diff options
Diffstat (limited to 'drivers/dma/dw_dmac.c')
| -rw-r--r-- | drivers/dma/dw_dmac.c | 1122 |
1 files changed, 1122 insertions, 0 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c new file mode 100644 index 000000000000..94df91771243 --- /dev/null +++ b/drivers/dma/dw_dmac.c | |||
| @@ -0,0 +1,1122 @@ | |||
| 1 | /* | ||
| 2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | ||
| 3 | * AVR32 systems.) | ||
| 4 | * | ||
| 5 | * Copyright (C) 2007-2008 Atmel Corporation | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | #include <linux/clk.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/dmaengine.h> | ||
| 14 | #include <linux/dma-mapping.h> | ||
| 15 | #include <linux/init.h> | ||
| 16 | #include <linux/interrupt.h> | ||
| 17 | #include <linux/io.h> | ||
| 18 | #include <linux/mm.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | |||
| 23 | #include "dw_dmac_regs.h" | ||
| 24 | |||
| 25 | /* | ||
| 26 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | ||
| 27 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | ||
| 28 | * of which use ARM any more). See the "Databook" from Synopsys for | ||
| 29 | * information beyond what licensees probably provide. | ||
| 30 | * | ||
| 31 | * The driver has currently been tested only with the Atmel AT32AP7000, | ||
| 32 | * which does not support descriptor writeback. | ||
| 33 | */ | ||
| 34 | |||
| 35 | /* NOTE: DMS+SMS is system-specific. We should get this information | ||
| 36 | * from the platform code somehow. | ||
| 37 | */ | ||
| 38 | #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ | ||
| 39 | | DWC_CTLL_SRC_MSIZE(0) \ | ||
| 40 | | DWC_CTLL_DMS(0) \ | ||
| 41 | | DWC_CTLL_SMS(1) \ | ||
| 42 | | DWC_CTLL_LLP_D_EN \ | ||
| 43 | | DWC_CTLL_LLP_S_EN) | ||
| 44 | |||
| 45 | /* | ||
| 46 | * This is configuration-dependent and usually a funny size like 4095. | ||
| 47 | * Let's round it down to the nearest power of two. | ||
| 48 | * | ||
| 49 | * Note that this is a transfer count, i.e. if we transfer 32-bit | ||
| 50 | * words, we can do 8192 bytes per descriptor. | ||
| 51 | * | ||
| 52 | * This parameter is also system-specific. | ||
| 53 | */ | ||
| 54 | #define DWC_MAX_COUNT 2048U | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Number of descriptors to allocate for each channel. This should be | ||
| 58 | * made configurable somehow; preferably, the clients (at least the | ||
| 59 | * ones using slave transfers) should be able to give us a hint. | ||
| 60 | */ | ||
| 61 | #define NR_DESCS_PER_CHANNEL 64 | ||
| 62 | |||
| 63 | /*----------------------------------------------------------------------*/ | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Because we're not relying on writeback from the controller (it may not | ||
| 67 | * even be configured into the core!) we don't need to use dma_pool. These | ||
| 68 | * descriptors -- and associated data -- are cacheable. We do need to make | ||
| 69 | * sure their dcache entries are written back before handing them off to | ||
| 70 | * the controller, though. | ||
| 71 | */ | ||
| 72 | |||
| 73 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | ||
| 74 | { | ||
| 75 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | ||
| 76 | } | ||
| 77 | |||
| 78 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) | ||
| 79 | { | ||
| 80 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); | ||
| 81 | } | ||
| 82 | |||
| 83 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | ||
| 84 | { | ||
| 85 | struct dw_desc *desc, *_desc; | ||
| 86 | struct dw_desc *ret = NULL; | ||
| 87 | unsigned int i = 0; | ||
| 88 | |||
| 89 | spin_lock_bh(&dwc->lock); | ||
| 90 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | ||
| 91 | if (async_tx_test_ack(&desc->txd)) { | ||
| 92 | list_del(&desc->desc_node); | ||
| 93 | ret = desc; | ||
| 94 | break; | ||
| 95 | } | ||
| 96 | dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc); | ||
| 97 | i++; | ||
| 98 | } | ||
| 99 | spin_unlock_bh(&dwc->lock); | ||
| 100 | |||
| 101 | dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i); | ||
| 102 | |||
| 103 | return ret; | ||
| 104 | } | ||
| 105 | |||
| 106 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
| 107 | { | ||
| 108 | struct dw_desc *child; | ||
| 109 | |||
| 110 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | ||
| 111 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | ||
| 112 | child->txd.phys, sizeof(child->lli), | ||
| 113 | DMA_TO_DEVICE); | ||
| 114 | dma_sync_single_for_cpu(dwc->chan.dev.parent, | ||
| 115 | desc->txd.phys, sizeof(desc->lli), | ||
| 116 | DMA_TO_DEVICE); | ||
| 117 | } | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Move a descriptor, including any children, to the free list. | ||
| 121 | * `desc' must not be on any lists. | ||
| 122 | */ | ||
| 123 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
| 124 | { | ||
| 125 | if (desc) { | ||
| 126 | struct dw_desc *child; | ||
| 127 | |||
| 128 | dwc_sync_desc_for_cpu(dwc, desc); | ||
| 129 | |||
| 130 | spin_lock_bh(&dwc->lock); | ||
| 131 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | ||
| 132 | dev_vdbg(&dwc->chan.dev, | ||
| 133 | "moving child desc %p to freelist\n", | ||
| 134 | child); | ||
| 135 | list_splice_init(&desc->txd.tx_list, &dwc->free_list); | ||
| 136 | dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc); | ||
| 137 | list_add(&desc->desc_node, &dwc->free_list); | ||
| 138 | spin_unlock_bh(&dwc->lock); | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | /* Called with dwc->lock held and bh disabled */ | ||
| 143 | static dma_cookie_t | ||
| 144 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
| 145 | { | ||
| 146 | dma_cookie_t cookie = dwc->chan.cookie; | ||
| 147 | |||
| 148 | if (++cookie < 0) | ||
| 149 | cookie = 1; | ||
| 150 | |||
| 151 | dwc->chan.cookie = cookie; | ||
| 152 | desc->txd.cookie = cookie; | ||
| 153 | |||
| 154 | return cookie; | ||
| 155 | } | ||
| 156 | |||
| 157 | /*----------------------------------------------------------------------*/ | ||
| 158 | |||
| 159 | /* Called with dwc->lock held and bh disabled */ | ||
| 160 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | ||
| 161 | { | ||
| 162 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 163 | |||
| 164 | /* ASSERT: channel is idle */ | ||
| 165 | if (dma_readl(dw, CH_EN) & dwc->mask) { | ||
| 166 | dev_err(&dwc->chan.dev, | ||
| 167 | "BUG: Attempted to start non-idle channel\n"); | ||
| 168 | dev_err(&dwc->chan.dev, | ||
| 169 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | ||
| 170 | channel_readl(dwc, SAR), | ||
| 171 | channel_readl(dwc, DAR), | ||
| 172 | channel_readl(dwc, LLP), | ||
| 173 | channel_readl(dwc, CTL_HI), | ||
| 174 | channel_readl(dwc, CTL_LO)); | ||
| 175 | |||
| 176 | /* The tasklet will hopefully advance the queue... */ | ||
| 177 | return; | ||
| 178 | } | ||
| 179 | |||
| 180 | channel_writel(dwc, LLP, first->txd.phys); | ||
| 181 | channel_writel(dwc, CTL_LO, | ||
| 182 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | ||
| 183 | channel_writel(dwc, CTL_HI, 0); | ||
| 184 | channel_set_bit(dw, CH_EN, dwc->mask); | ||
| 185 | } | ||
| 186 | |||
| 187 | /*----------------------------------------------------------------------*/ | ||
| 188 | |||
| 189 | static void | ||
| 190 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | ||
| 191 | { | ||
| 192 | dma_async_tx_callback callback; | ||
| 193 | void *param; | ||
| 194 | struct dma_async_tx_descriptor *txd = &desc->txd; | ||
| 195 | |||
| 196 | dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie); | ||
| 197 | |||
| 198 | dwc->completed = txd->cookie; | ||
| 199 | callback = txd->callback; | ||
| 200 | param = txd->callback_param; | ||
| 201 | |||
| 202 | dwc_sync_desc_for_cpu(dwc, desc); | ||
| 203 | list_splice_init(&txd->tx_list, &dwc->free_list); | ||
| 204 | list_move(&desc->desc_node, &dwc->free_list); | ||
| 205 | |||
| 206 | /* | ||
| 207 | * We use dma_unmap_page() regardless of how the buffers were | ||
| 208 | * mapped before they were submitted... | ||
| 209 | */ | ||
| 210 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
| 211 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len, | ||
| 212 | DMA_FROM_DEVICE); | ||
| 213 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
| 214 | dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len, | ||
| 215 | DMA_TO_DEVICE); | ||
| 216 | |||
| 217 | /* | ||
| 218 | * The API requires that no submissions are done from a | ||
| 219 | * callback, so we don't need to drop the lock here | ||
| 220 | */ | ||
| 221 | if (callback) | ||
| 222 | callback(param); | ||
| 223 | } | ||
| 224 | |||
| 225 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
| 226 | { | ||
| 227 | struct dw_desc *desc, *_desc; | ||
| 228 | LIST_HEAD(list); | ||
| 229 | |||
| 230 | if (dma_readl(dw, CH_EN) & dwc->mask) { | ||
| 231 | dev_err(&dwc->chan.dev, | ||
| 232 | "BUG: XFER bit set, but channel not idle!\n"); | ||
| 233 | |||
| 234 | /* Try to continue after resetting the channel... */ | ||
| 235 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
| 236 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
| 237 | cpu_relax(); | ||
| 238 | } | ||
| 239 | |||
| 240 | /* | ||
| 241 | * Submit queued descriptors ASAP, i.e. before we go through | ||
| 242 | * the completed ones. | ||
| 243 | */ | ||
| 244 | if (!list_empty(&dwc->queue)) | ||
| 245 | dwc_dostart(dwc, dwc_first_queued(dwc)); | ||
| 246 | list_splice_init(&dwc->active_list, &list); | ||
| 247 | list_splice_init(&dwc->queue, &dwc->active_list); | ||
| 248 | |||
| 249 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
| 250 | dwc_descriptor_complete(dwc, desc); | ||
| 251 | } | ||
| 252 | |||
| 253 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
| 254 | { | ||
| 255 | dma_addr_t llp; | ||
| 256 | struct dw_desc *desc, *_desc; | ||
| 257 | struct dw_desc *child; | ||
| 258 | u32 status_xfer; | ||
| 259 | |||
| 260 | /* | ||
| 261 | * Clear block interrupt flag before scanning so that we don't | ||
| 262 | * miss any, and read LLP before RAW_XFER to ensure it is | ||
| 263 | * valid if we decide to scan the list. | ||
| 264 | */ | ||
| 265 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | ||
| 266 | llp = channel_readl(dwc, LLP); | ||
| 267 | status_xfer = dma_readl(dw, RAW.XFER); | ||
| 268 | |||
| 269 | if (status_xfer & dwc->mask) { | ||
| 270 | /* Everything we've submitted is done */ | ||
| 271 | dma_writel(dw, CLEAR.XFER, dwc->mask); | ||
| 272 | dwc_complete_all(dw, dwc); | ||
| 273 | return; | ||
| 274 | } | ||
| 275 | |||
| 276 | dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp); | ||
| 277 | |||
| 278 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | ||
| 279 | if (desc->lli.llp == llp) | ||
| 280 | /* This one is currently in progress */ | ||
| 281 | return; | ||
| 282 | |||
| 283 | list_for_each_entry(child, &desc->txd.tx_list, desc_node) | ||
| 284 | if (child->lli.llp == llp) | ||
| 285 | /* Currently in progress */ | ||
| 286 | return; | ||
| 287 | |||
| 288 | /* | ||
| 289 | * No descriptors so far seem to be in progress, i.e. | ||
| 290 | * this one must be done. | ||
| 291 | */ | ||
| 292 | dwc_descriptor_complete(dwc, desc); | ||
| 293 | } | ||
| 294 | |||
| 295 | dev_err(&dwc->chan.dev, | ||
| 296 | "BUG: All descriptors done, but channel not idle!\n"); | ||
| 297 | |||
| 298 | /* Try to continue after resetting the channel... */ | ||
| 299 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
| 300 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
| 301 | cpu_relax(); | ||
| 302 | |||
| 303 | if (!list_empty(&dwc->queue)) { | ||
| 304 | dwc_dostart(dwc, dwc_first_queued(dwc)); | ||
| 305 | list_splice_init(&dwc->queue, &dwc->active_list); | ||
| 306 | } | ||
| 307 | } | ||
| 308 | |||
| 309 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | ||
| 310 | { | ||
| 311 | dev_printk(KERN_CRIT, &dwc->chan.dev, | ||
| 312 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | ||
| 313 | lli->sar, lli->dar, lli->llp, | ||
| 314 | lli->ctlhi, lli->ctllo); | ||
| 315 | } | ||
| 316 | |||
| 317 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | ||
| 318 | { | ||
| 319 | struct dw_desc *bad_desc; | ||
| 320 | struct dw_desc *child; | ||
| 321 | |||
| 322 | dwc_scan_descriptors(dw, dwc); | ||
| 323 | |||
| 324 | /* | ||
| 325 | * The descriptor currently at the head of the active list is | ||
| 326 | * borked. Since we don't have any way to report errors, we'll | ||
| 327 | * just have to scream loudly and try to carry on. | ||
| 328 | */ | ||
| 329 | bad_desc = dwc_first_active(dwc); | ||
| 330 | list_del_init(&bad_desc->desc_node); | ||
| 331 | list_splice_init(&dwc->queue, dwc->active_list.prev); | ||
| 332 | |||
| 333 | /* Clear the error flag and try to restart the controller */ | ||
| 334 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | ||
| 335 | if (!list_empty(&dwc->active_list)) | ||
| 336 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
| 337 | |||
| 338 | /* | ||
| 339 | * KERN_CRITICAL may seem harsh, but since this only happens | ||
| 340 | * when someone submits a bad physical address in a | ||
| 341 | * descriptor, we should consider ourselves lucky that the | ||
| 342 | * controller flagged an error instead of scribbling over | ||
| 343 | * random memory locations. | ||
| 344 | */ | ||
| 345 | dev_printk(KERN_CRIT, &dwc->chan.dev, | ||
| 346 | "Bad descriptor submitted for DMA!\n"); | ||
| 347 | dev_printk(KERN_CRIT, &dwc->chan.dev, | ||
| 348 | " cookie: %d\n", bad_desc->txd.cookie); | ||
| 349 | dwc_dump_lli(dwc, &bad_desc->lli); | ||
| 350 | list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) | ||
| 351 | dwc_dump_lli(dwc, &child->lli); | ||
| 352 | |||
| 353 | /* Pretend the descriptor completed successfully */ | ||
| 354 | dwc_descriptor_complete(dwc, bad_desc); | ||
| 355 | } | ||
| 356 | |||
| 357 | static void dw_dma_tasklet(unsigned long data) | ||
| 358 | { | ||
| 359 | struct dw_dma *dw = (struct dw_dma *)data; | ||
| 360 | struct dw_dma_chan *dwc; | ||
| 361 | u32 status_block; | ||
| 362 | u32 status_xfer; | ||
| 363 | u32 status_err; | ||
| 364 | int i; | ||
| 365 | |||
| 366 | status_block = dma_readl(dw, RAW.BLOCK); | ||
| 367 | status_xfer = dma_readl(dw, RAW.BLOCK); | ||
| 368 | status_err = dma_readl(dw, RAW.ERROR); | ||
| 369 | |||
| 370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | ||
| 371 | status_block, status_err); | ||
| 372 | |||
| 373 | for (i = 0; i < dw->dma.chancnt; i++) { | ||
| 374 | dwc = &dw->chan[i]; | ||
| 375 | spin_lock(&dwc->lock); | ||
| 376 | if (status_err & (1 << i)) | ||
| 377 | dwc_handle_error(dw, dwc); | ||
| 378 | else if ((status_block | status_xfer) & (1 << i)) | ||
| 379 | dwc_scan_descriptors(dw, dwc); | ||
| 380 | spin_unlock(&dwc->lock); | ||
| 381 | } | ||
| 382 | |||
| 383 | /* | ||
| 384 | * Re-enable interrupts. Block Complete interrupts are only | ||
| 385 | * enabled if the INT_EN bit in the descriptor is set. This | ||
| 386 | * will trigger a scan before the whole list is done. | ||
| 387 | */ | ||
| 388 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
| 389 | channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
| 390 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
| 391 | } | ||
| 392 | |||
| 393 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | ||
| 394 | { | ||
| 395 | struct dw_dma *dw = dev_id; | ||
| 396 | u32 status; | ||
| 397 | |||
| 398 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | ||
| 399 | dma_readl(dw, STATUS_INT)); | ||
| 400 | |||
| 401 | /* | ||
| 402 | * Just disable the interrupts. We'll turn them back on in the | ||
| 403 | * softirq handler. | ||
| 404 | */ | ||
| 405 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
| 406 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
| 407 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
| 408 | |||
| 409 | status = dma_readl(dw, STATUS_INT); | ||
| 410 | if (status) { | ||
| 411 | dev_err(dw->dma.dev, | ||
| 412 | "BUG: Unexpected interrupts pending: 0x%x\n", | ||
| 413 | status); | ||
| 414 | |||
| 415 | /* Try to recover */ | ||
| 416 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | ||
| 417 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); | ||
| 418 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); | ||
| 419 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | ||
| 420 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | ||
| 421 | } | ||
| 422 | |||
| 423 | tasklet_schedule(&dw->tasklet); | ||
| 424 | |||
| 425 | return IRQ_HANDLED; | ||
| 426 | } | ||
| 427 | |||
| 428 | /*----------------------------------------------------------------------*/ | ||
| 429 | |||
| 430 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 431 | { | ||
| 432 | struct dw_desc *desc = txd_to_dw_desc(tx); | ||
| 433 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | ||
| 434 | dma_cookie_t cookie; | ||
| 435 | |||
| 436 | spin_lock_bh(&dwc->lock); | ||
| 437 | cookie = dwc_assign_cookie(dwc, desc); | ||
| 438 | |||
| 439 | /* | ||
| 440 | * REVISIT: We should attempt to chain as many descriptors as | ||
| 441 | * possible, perhaps even appending to those already submitted | ||
| 442 | * for DMA. But this is hard to do in a race-free manner. | ||
| 443 | */ | ||
| 444 | if (list_empty(&dwc->active_list)) { | ||
| 445 | dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n", | ||
| 446 | desc->txd.cookie); | ||
| 447 | dwc_dostart(dwc, desc); | ||
| 448 | list_add_tail(&desc->desc_node, &dwc->active_list); | ||
| 449 | } else { | ||
| 450 | dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n", | ||
| 451 | desc->txd.cookie); | ||
| 452 | |||
| 453 | list_add_tail(&desc->desc_node, &dwc->queue); | ||
| 454 | } | ||
| 455 | |||
| 456 | spin_unlock_bh(&dwc->lock); | ||
| 457 | |||
| 458 | return cookie; | ||
| 459 | } | ||
| 460 | |||
| 461 | static struct dma_async_tx_descriptor * | ||
| 462 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
| 463 | size_t len, unsigned long flags) | ||
| 464 | { | ||
| 465 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
| 466 | struct dw_desc *desc; | ||
| 467 | struct dw_desc *first; | ||
| 468 | struct dw_desc *prev; | ||
| 469 | size_t xfer_count; | ||
| 470 | size_t offset; | ||
| 471 | unsigned int src_width; | ||
| 472 | unsigned int dst_width; | ||
| 473 | u32 ctllo; | ||
| 474 | |||
| 475 | dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | ||
| 476 | dest, src, len, flags); | ||
| 477 | |||
| 478 | if (unlikely(!len)) { | ||
| 479 | dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n"); | ||
| 480 | return NULL; | ||
| 481 | } | ||
| 482 | |||
| 483 | /* | ||
| 484 | * We can be a lot more clever here, but this should take care | ||
| 485 | * of the most common optimization. | ||
| 486 | */ | ||
| 487 | if (!((src | dest | len) & 3)) | ||
| 488 | src_width = dst_width = 2; | ||
| 489 | else if (!((src | dest | len) & 1)) | ||
| 490 | src_width = dst_width = 1; | ||
| 491 | else | ||
| 492 | src_width = dst_width = 0; | ||
| 493 | |||
| 494 | ctllo = DWC_DEFAULT_CTLLO | ||
| 495 | | DWC_CTLL_DST_WIDTH(dst_width) | ||
| 496 | | DWC_CTLL_SRC_WIDTH(src_width) | ||
| 497 | | DWC_CTLL_DST_INC | ||
| 498 | | DWC_CTLL_SRC_INC | ||
| 499 | | DWC_CTLL_FC_M2M; | ||
| 500 | prev = first = NULL; | ||
| 501 | |||
| 502 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | ||
| 503 | xfer_count = min_t(size_t, (len - offset) >> src_width, | ||
| 504 | DWC_MAX_COUNT); | ||
| 505 | |||
| 506 | desc = dwc_desc_get(dwc); | ||
| 507 | if (!desc) | ||
| 508 | goto err_desc_get; | ||
| 509 | |||
| 510 | desc->lli.sar = src + offset; | ||
| 511 | desc->lli.dar = dest + offset; | ||
| 512 | desc->lli.ctllo = ctllo; | ||
| 513 | desc->lli.ctlhi = xfer_count; | ||
| 514 | |||
| 515 | if (!first) { | ||
| 516 | first = desc; | ||
| 517 | } else { | ||
| 518 | prev->lli.llp = desc->txd.phys; | ||
| 519 | dma_sync_single_for_device(chan->dev.parent, | ||
| 520 | prev->txd.phys, sizeof(prev->lli), | ||
| 521 | DMA_TO_DEVICE); | ||
| 522 | list_add_tail(&desc->desc_node, | ||
| 523 | &first->txd.tx_list); | ||
| 524 | } | ||
| 525 | prev = desc; | ||
| 526 | } | ||
| 527 | |||
| 528 | |||
| 529 | if (flags & DMA_PREP_INTERRUPT) | ||
| 530 | /* Trigger interrupt after last block */ | ||
| 531 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | ||
| 532 | |||
| 533 | prev->lli.llp = 0; | ||
| 534 | dma_sync_single_for_device(chan->dev.parent, | ||
| 535 | prev->txd.phys, sizeof(prev->lli), | ||
| 536 | DMA_TO_DEVICE); | ||
| 537 | |||
| 538 | first->txd.flags = flags; | ||
| 539 | first->len = len; | ||
| 540 | |||
| 541 | return &first->txd; | ||
| 542 | |||
| 543 | err_desc_get: | ||
| 544 | dwc_desc_put(dwc, first); | ||
| 545 | return NULL; | ||
| 546 | } | ||
| 547 | |||
| 548 | static struct dma_async_tx_descriptor * | ||
| 549 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
| 550 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 551 | unsigned long flags) | ||
| 552 | { | ||
| 553 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
| 554 | struct dw_dma_slave *dws = dwc->dws; | ||
| 555 | struct dw_desc *prev; | ||
| 556 | struct dw_desc *first; | ||
| 557 | u32 ctllo; | ||
| 558 | dma_addr_t reg; | ||
| 559 | unsigned int reg_width; | ||
| 560 | unsigned int mem_width; | ||
| 561 | unsigned int i; | ||
| 562 | struct scatterlist *sg; | ||
| 563 | size_t total_len = 0; | ||
| 564 | |||
| 565 | dev_vdbg(&chan->dev, "prep_dma_slave\n"); | ||
| 566 | |||
| 567 | if (unlikely(!dws || !sg_len)) | ||
| 568 | return NULL; | ||
| 569 | |||
| 570 | reg_width = dws->slave.reg_width; | ||
| 571 | prev = first = NULL; | ||
| 572 | |||
| 573 | sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction); | ||
| 574 | |||
| 575 | switch (direction) { | ||
| 576 | case DMA_TO_DEVICE: | ||
| 577 | ctllo = (DWC_DEFAULT_CTLLO | ||
| 578 | | DWC_CTLL_DST_WIDTH(reg_width) | ||
| 579 | | DWC_CTLL_DST_FIX | ||
| 580 | | DWC_CTLL_SRC_INC | ||
| 581 | | DWC_CTLL_FC_M2P); | ||
| 582 | reg = dws->slave.tx_reg; | ||
| 583 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 584 | struct dw_desc *desc; | ||
| 585 | u32 len; | ||
| 586 | u32 mem; | ||
| 587 | |||
| 588 | desc = dwc_desc_get(dwc); | ||
| 589 | if (!desc) { | ||
| 590 | dev_err(&chan->dev, | ||
| 591 | "not enough descriptors available\n"); | ||
| 592 | goto err_desc_get; | ||
| 593 | } | ||
| 594 | |||
| 595 | mem = sg_phys(sg); | ||
| 596 | len = sg_dma_len(sg); | ||
| 597 | mem_width = 2; | ||
| 598 | if (unlikely(mem & 3 || len & 3)) | ||
| 599 | mem_width = 0; | ||
| 600 | |||
| 601 | desc->lli.sar = mem; | ||
| 602 | desc->lli.dar = reg; | ||
| 603 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | ||
| 604 | desc->lli.ctlhi = len >> mem_width; | ||
| 605 | |||
| 606 | if (!first) { | ||
| 607 | first = desc; | ||
| 608 | } else { | ||
| 609 | prev->lli.llp = desc->txd.phys; | ||
| 610 | dma_sync_single_for_device(chan->dev.parent, | ||
| 611 | prev->txd.phys, | ||
| 612 | sizeof(prev->lli), | ||
| 613 | DMA_TO_DEVICE); | ||
| 614 | list_add_tail(&desc->desc_node, | ||
| 615 | &first->txd.tx_list); | ||
| 616 | } | ||
| 617 | prev = desc; | ||
| 618 | total_len += len; | ||
| 619 | } | ||
| 620 | break; | ||
| 621 | case DMA_FROM_DEVICE: | ||
| 622 | ctllo = (DWC_DEFAULT_CTLLO | ||
| 623 | | DWC_CTLL_SRC_WIDTH(reg_width) | ||
| 624 | | DWC_CTLL_DST_INC | ||
| 625 | | DWC_CTLL_SRC_FIX | ||
| 626 | | DWC_CTLL_FC_P2M); | ||
| 627 | |||
| 628 | reg = dws->slave.rx_reg; | ||
| 629 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 630 | struct dw_desc *desc; | ||
| 631 | u32 len; | ||
| 632 | u32 mem; | ||
| 633 | |||
| 634 | desc = dwc_desc_get(dwc); | ||
| 635 | if (!desc) { | ||
| 636 | dev_err(&chan->dev, | ||
| 637 | "not enough descriptors available\n"); | ||
| 638 | goto err_desc_get; | ||
| 639 | } | ||
| 640 | |||
| 641 | mem = sg_phys(sg); | ||
| 642 | len = sg_dma_len(sg); | ||
| 643 | mem_width = 2; | ||
| 644 | if (unlikely(mem & 3 || len & 3)) | ||
| 645 | mem_width = 0; | ||
| 646 | |||
| 647 | desc->lli.sar = reg; | ||
| 648 | desc->lli.dar = mem; | ||
| 649 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | ||
| 650 | desc->lli.ctlhi = len >> reg_width; | ||
| 651 | |||
| 652 | if (!first) { | ||
| 653 | first = desc; | ||
| 654 | } else { | ||
| 655 | prev->lli.llp = desc->txd.phys; | ||
| 656 | dma_sync_single_for_device(chan->dev.parent, | ||
| 657 | prev->txd.phys, | ||
| 658 | sizeof(prev->lli), | ||
| 659 | DMA_TO_DEVICE); | ||
| 660 | list_add_tail(&desc->desc_node, | ||
| 661 | &first->txd.tx_list); | ||
| 662 | } | ||
| 663 | prev = desc; | ||
| 664 | total_len += len; | ||
| 665 | } | ||
| 666 | break; | ||
| 667 | default: | ||
| 668 | return NULL; | ||
| 669 | } | ||
| 670 | |||
| 671 | if (flags & DMA_PREP_INTERRUPT) | ||
| 672 | /* Trigger interrupt after last block */ | ||
| 673 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | ||
| 674 | |||
| 675 | prev->lli.llp = 0; | ||
| 676 | dma_sync_single_for_device(chan->dev.parent, | ||
| 677 | prev->txd.phys, sizeof(prev->lli), | ||
| 678 | DMA_TO_DEVICE); | ||
| 679 | |||
| 680 | first->len = total_len; | ||
| 681 | |||
| 682 | return &first->txd; | ||
| 683 | |||
| 684 | err_desc_get: | ||
| 685 | dwc_desc_put(dwc, first); | ||
| 686 | return NULL; | ||
| 687 | } | ||
| 688 | |||
| 689 | static void dwc_terminate_all(struct dma_chan *chan) | ||
| 690 | { | ||
| 691 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
| 692 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
| 693 | struct dw_desc *desc, *_desc; | ||
| 694 | LIST_HEAD(list); | ||
| 695 | |||
| 696 | /* | ||
| 697 | * This is only called when something went wrong elsewhere, so | ||
| 698 | * we don't really care about the data. Just disable the | ||
| 699 | * channel. We still have to poll the channel enable bit due | ||
| 700 | * to AHB/HSB limitations. | ||
| 701 | */ | ||
| 702 | spin_lock_bh(&dwc->lock); | ||
| 703 | |||
| 704 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
| 705 | |||
| 706 | while (dma_readl(dw, CH_EN) & dwc->mask) | ||
| 707 | cpu_relax(); | ||
| 708 | |||
| 709 | /* active_list entries will end up before queued entries */ | ||
| 710 | list_splice_init(&dwc->queue, &list); | ||
| 711 | list_splice_init(&dwc->active_list, &list); | ||
| 712 | |||
| 713 | spin_unlock_bh(&dwc->lock); | ||
| 714 | |||
| 715 | /* Flush all pending and queued descriptors */ | ||
| 716 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
| 717 | dwc_descriptor_complete(dwc, desc); | ||
| 718 | } | ||
| 719 | |||
| 720 | static enum dma_status | ||
| 721 | dwc_is_tx_complete(struct dma_chan *chan, | ||
| 722 | dma_cookie_t cookie, | ||
| 723 | dma_cookie_t *done, dma_cookie_t *used) | ||
| 724 | { | ||
| 725 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
| 726 | dma_cookie_t last_used; | ||
| 727 | dma_cookie_t last_complete; | ||
| 728 | int ret; | ||
| 729 | |||
| 730 | last_complete = dwc->completed; | ||
| 731 | last_used = chan->cookie; | ||
| 732 | |||
| 733 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
| 734 | if (ret != DMA_SUCCESS) { | ||
| 735 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | ||
| 736 | |||
| 737 | last_complete = dwc->completed; | ||
| 738 | last_used = chan->cookie; | ||
| 739 | |||
| 740 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
| 741 | } | ||
| 742 | |||
| 743 | if (done) | ||
| 744 | *done = last_complete; | ||
| 745 | if (used) | ||
| 746 | *used = last_used; | ||
| 747 | |||
| 748 | return ret; | ||
| 749 | } | ||
| 750 | |||
| 751 | static void dwc_issue_pending(struct dma_chan *chan) | ||
| 752 | { | ||
| 753 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
| 754 | |||
| 755 | spin_lock_bh(&dwc->lock); | ||
| 756 | if (!list_empty(&dwc->queue)) | ||
| 757 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | ||
| 758 | spin_unlock_bh(&dwc->lock); | ||
| 759 | } | ||
| 760 | |||
| 761 | static int dwc_alloc_chan_resources(struct dma_chan *chan, | ||
| 762 | struct dma_client *client) | ||
| 763 | { | ||
| 764 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
| 765 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
| 766 | struct dw_desc *desc; | ||
| 767 | struct dma_slave *slave; | ||
| 768 | struct dw_dma_slave *dws; | ||
| 769 | int i; | ||
| 770 | u32 cfghi; | ||
| 771 | u32 cfglo; | ||
| 772 | |||
| 773 | dev_vdbg(&chan->dev, "alloc_chan_resources\n"); | ||
| 774 | |||
| 775 | /* Channels doing slave DMA can only handle one client. */ | ||
| 776 | if (dwc->dws || client->slave) { | ||
| 777 | if (chan->client_count) | ||
| 778 | return -EBUSY; | ||
| 779 | } | ||
| 780 | |||
| 781 | /* ASSERT: channel is idle */ | ||
| 782 | if (dma_readl(dw, CH_EN) & dwc->mask) { | ||
| 783 | dev_dbg(&chan->dev, "DMA channel not idle?\n"); | ||
| 784 | return -EIO; | ||
| 785 | } | ||
| 786 | |||
| 787 | dwc->completed = chan->cookie = 1; | ||
| 788 | |||
| 789 | cfghi = DWC_CFGH_FIFO_MODE; | ||
| 790 | cfglo = 0; | ||
| 791 | |||
| 792 | slave = client->slave; | ||
| 793 | if (slave) { | ||
| 794 | /* | ||
| 795 | * We need controller-specific data to set up slave | ||
| 796 | * transfers. | ||
| 797 | */ | ||
| 798 | BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev); | ||
| 799 | |||
| 800 | dws = container_of(slave, struct dw_dma_slave, slave); | ||
| 801 | |||
| 802 | dwc->dws = dws; | ||
| 803 | cfghi = dws->cfg_hi; | ||
| 804 | cfglo = dws->cfg_lo; | ||
| 805 | } else { | ||
| 806 | dwc->dws = NULL; | ||
| 807 | } | ||
| 808 | |||
| 809 | channel_writel(dwc, CFG_LO, cfglo); | ||
| 810 | channel_writel(dwc, CFG_HI, cfghi); | ||
| 811 | |||
| 812 | /* | ||
| 813 | * NOTE: some controllers may have additional features that we | ||
| 814 | * need to initialize here, like "scatter-gather" (which | ||
| 815 | * doesn't mean what you think it means), and status writeback. | ||
| 816 | */ | ||
| 817 | |||
| 818 | spin_lock_bh(&dwc->lock); | ||
| 819 | i = dwc->descs_allocated; | ||
| 820 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | ||
| 821 | spin_unlock_bh(&dwc->lock); | ||
| 822 | |||
| 823 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | ||
| 824 | if (!desc) { | ||
| 825 | dev_info(&chan->dev, | ||
| 826 | "only allocated %d descriptors\n", i); | ||
| 827 | spin_lock_bh(&dwc->lock); | ||
| 828 | break; | ||
| 829 | } | ||
| 830 | |||
| 831 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
| 832 | desc->txd.tx_submit = dwc_tx_submit; | ||
| 833 | desc->txd.flags = DMA_CTRL_ACK; | ||
| 834 | INIT_LIST_HEAD(&desc->txd.tx_list); | ||
| 835 | desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli, | ||
| 836 | sizeof(desc->lli), DMA_TO_DEVICE); | ||
| 837 | dwc_desc_put(dwc, desc); | ||
| 838 | |||
| 839 | spin_lock_bh(&dwc->lock); | ||
| 840 | i = ++dwc->descs_allocated; | ||
| 841 | } | ||
| 842 | |||
| 843 | /* Enable interrupts */ | ||
| 844 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
| 845 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
| 846 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
| 847 | |||
| 848 | spin_unlock_bh(&dwc->lock); | ||
| 849 | |||
| 850 | dev_dbg(&chan->dev, | ||
| 851 | "alloc_chan_resources allocated %d descriptors\n", i); | ||
| 852 | |||
| 853 | return i; | ||
| 854 | } | ||
| 855 | |||
| 856 | static void dwc_free_chan_resources(struct dma_chan *chan) | ||
| 857 | { | ||
| 858 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
| 859 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
| 860 | struct dw_desc *desc, *_desc; | ||
| 861 | LIST_HEAD(list); | ||
| 862 | |||
| 863 | dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n", | ||
| 864 | dwc->descs_allocated); | ||
| 865 | |||
| 866 | /* ASSERT: channel is idle */ | ||
| 867 | BUG_ON(!list_empty(&dwc->active_list)); | ||
| 868 | BUG_ON(!list_empty(&dwc->queue)); | ||
| 869 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | ||
| 870 | |||
| 871 | spin_lock_bh(&dwc->lock); | ||
| 872 | list_splice_init(&dwc->free_list, &list); | ||
| 873 | dwc->descs_allocated = 0; | ||
| 874 | dwc->dws = NULL; | ||
| 875 | |||
| 876 | /* Disable interrupts */ | ||
| 877 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | ||
| 878 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | ||
| 879 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | ||
| 880 | |||
| 881 | spin_unlock_bh(&dwc->lock); | ||
| 882 | |||
| 883 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | ||
| 884 | dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc); | ||
| 885 | dma_unmap_single(chan->dev.parent, desc->txd.phys, | ||
| 886 | sizeof(desc->lli), DMA_TO_DEVICE); | ||
| 887 | kfree(desc); | ||
| 888 | } | ||
| 889 | |||
| 890 | dev_vdbg(&chan->dev, "free_chan_resources done\n"); | ||
| 891 | } | ||
| 892 | |||
| 893 | /*----------------------------------------------------------------------*/ | ||
| 894 | |||
| 895 | static void dw_dma_off(struct dw_dma *dw) | ||
| 896 | { | ||
| 897 | dma_writel(dw, CFG, 0); | ||
| 898 | |||
| 899 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
| 900 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
| 901 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
| 902 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
| 903 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
| 904 | |||
| 905 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | ||
| 906 | cpu_relax(); | ||
| 907 | } | ||
| 908 | |||
| 909 | static int __init dw_probe(struct platform_device *pdev) | ||
| 910 | { | ||
| 911 | struct dw_dma_platform_data *pdata; | ||
| 912 | struct resource *io; | ||
| 913 | struct dw_dma *dw; | ||
| 914 | size_t size; | ||
| 915 | int irq; | ||
| 916 | int err; | ||
| 917 | int i; | ||
| 918 | |||
| 919 | pdata = pdev->dev.platform_data; | ||
| 920 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | ||
| 921 | return -EINVAL; | ||
| 922 | |||
| 923 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 924 | if (!io) | ||
| 925 | return -EINVAL; | ||
| 926 | |||
| 927 | irq = platform_get_irq(pdev, 0); | ||
| 928 | if (irq < 0) | ||
| 929 | return irq; | ||
| 930 | |||
| 931 | size = sizeof(struct dw_dma); | ||
| 932 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | ||
| 933 | dw = kzalloc(size, GFP_KERNEL); | ||
| 934 | if (!dw) | ||
| 935 | return -ENOMEM; | ||
| 936 | |||
| 937 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | ||
| 938 | err = -EBUSY; | ||
| 939 | goto err_kfree; | ||
| 940 | } | ||
| 941 | |||
| 942 | memset(dw, 0, sizeof *dw); | ||
| 943 | |||
| 944 | dw->regs = ioremap(io->start, DW_REGLEN); | ||
| 945 | if (!dw->regs) { | ||
| 946 | err = -ENOMEM; | ||
| 947 | goto err_release_r; | ||
| 948 | } | ||
| 949 | |||
| 950 | dw->clk = clk_get(&pdev->dev, "hclk"); | ||
| 951 | if (IS_ERR(dw->clk)) { | ||
| 952 | err = PTR_ERR(dw->clk); | ||
| 953 | goto err_clk; | ||
| 954 | } | ||
| 955 | clk_enable(dw->clk); | ||
| 956 | |||
| 957 | /* force dma off, just in case */ | ||
| 958 | dw_dma_off(dw); | ||
| 959 | |||
| 960 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | ||
| 961 | if (err) | ||
| 962 | goto err_irq; | ||
| 963 | |||
| 964 | platform_set_drvdata(pdev, dw); | ||
| 965 | |||
| 966 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | ||
| 967 | |||
| 968 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | ||
| 969 | |||
| 970 | INIT_LIST_HEAD(&dw->dma.channels); | ||
| 971 | for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { | ||
| 972 | struct dw_dma_chan *dwc = &dw->chan[i]; | ||
| 973 | |||
| 974 | dwc->chan.device = &dw->dma; | ||
| 975 | dwc->chan.cookie = dwc->completed = 1; | ||
| 976 | dwc->chan.chan_id = i; | ||
| 977 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); | ||
| 978 | |||
| 979 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | ||
| 980 | spin_lock_init(&dwc->lock); | ||
| 981 | dwc->mask = 1 << i; | ||
| 982 | |||
| 983 | INIT_LIST_HEAD(&dwc->active_list); | ||
| 984 | INIT_LIST_HEAD(&dwc->queue); | ||
| 985 | INIT_LIST_HEAD(&dwc->free_list); | ||
| 986 | |||
| 987 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
| 988 | } | ||
| 989 | |||
| 990 | /* Clear/disable all interrupts on all channels. */ | ||
| 991 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | ||
| 992 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | ||
| 993 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | ||
| 994 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | ||
| 995 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | ||
| 996 | |||
| 997 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | ||
| 998 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | ||
| 999 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | ||
| 1000 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | ||
| 1001 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | ||
| 1002 | |||
| 1003 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | ||
| 1004 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | ||
| 1005 | dw->dma.dev = &pdev->dev; | ||
| 1006 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | ||
| 1007 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | ||
| 1008 | |||
| 1009 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | ||
| 1010 | |||
| 1011 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | ||
| 1012 | dw->dma.device_terminate_all = dwc_terminate_all; | ||
| 1013 | |||
| 1014 | dw->dma.device_is_tx_complete = dwc_is_tx_complete; | ||
| 1015 | dw->dma.device_issue_pending = dwc_issue_pending; | ||
| 1016 | |||
| 1017 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
| 1018 | |||
| 1019 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | ||
| 1020 | pdev->dev.bus_id, dw->dma.chancnt); | ||
| 1021 | |||
| 1022 | dma_async_device_register(&dw->dma); | ||
| 1023 | |||
| 1024 | return 0; | ||
| 1025 | |||
| 1026 | err_irq: | ||
| 1027 | clk_disable(dw->clk); | ||
| 1028 | clk_put(dw->clk); | ||
| 1029 | err_clk: | ||
| 1030 | iounmap(dw->regs); | ||
| 1031 | dw->regs = NULL; | ||
| 1032 | err_release_r: | ||
| 1033 | release_resource(io); | ||
| 1034 | err_kfree: | ||
| 1035 | kfree(dw); | ||
| 1036 | return err; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | static int __exit dw_remove(struct platform_device *pdev) | ||
| 1040 | { | ||
| 1041 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
| 1042 | struct dw_dma_chan *dwc, *_dwc; | ||
| 1043 | struct resource *io; | ||
| 1044 | |||
| 1045 | dw_dma_off(dw); | ||
| 1046 | dma_async_device_unregister(&dw->dma); | ||
| 1047 | |||
| 1048 | free_irq(platform_get_irq(pdev, 0), dw); | ||
| 1049 | tasklet_kill(&dw->tasklet); | ||
| 1050 | |||
| 1051 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | ||
| 1052 | chan.device_node) { | ||
| 1053 | list_del(&dwc->chan.device_node); | ||
| 1054 | channel_clear_bit(dw, CH_EN, dwc->mask); | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | clk_disable(dw->clk); | ||
| 1058 | clk_put(dw->clk); | ||
| 1059 | |||
| 1060 | iounmap(dw->regs); | ||
| 1061 | dw->regs = NULL; | ||
| 1062 | |||
| 1063 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1064 | release_mem_region(io->start, DW_REGLEN); | ||
| 1065 | |||
| 1066 | kfree(dw); | ||
| 1067 | |||
| 1068 | return 0; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | static void dw_shutdown(struct platform_device *pdev) | ||
| 1072 | { | ||
| 1073 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
| 1074 | |||
| 1075 | dw_dma_off(platform_get_drvdata(pdev)); | ||
| 1076 | clk_disable(dw->clk); | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg) | ||
| 1080 | { | ||
| 1081 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
| 1082 | |||
| 1083 | dw_dma_off(platform_get_drvdata(pdev)); | ||
| 1084 | clk_disable(dw->clk); | ||
| 1085 | return 0; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | static int dw_resume_early(struct platform_device *pdev) | ||
| 1089 | { | ||
| 1090 | struct dw_dma *dw = platform_get_drvdata(pdev); | ||
| 1091 | |||
| 1092 | clk_enable(dw->clk); | ||
| 1093 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | ||
| 1094 | return 0; | ||
| 1095 | |||
| 1096 | } | ||
| 1097 | |||
| 1098 | static struct platform_driver dw_driver = { | ||
| 1099 | .remove = __exit_p(dw_remove), | ||
| 1100 | .shutdown = dw_shutdown, | ||
| 1101 | .suspend_late = dw_suspend_late, | ||
| 1102 | .resume_early = dw_resume_early, | ||
| 1103 | .driver = { | ||
| 1104 | .name = "dw_dmac", | ||
| 1105 | }, | ||
| 1106 | }; | ||
| 1107 | |||
| 1108 | static int __init dw_init(void) | ||
| 1109 | { | ||
| 1110 | return platform_driver_probe(&dw_driver, dw_probe); | ||
| 1111 | } | ||
| 1112 | module_init(dw_init); | ||
| 1113 | |||
| 1114 | static void __exit dw_exit(void) | ||
| 1115 | { | ||
| 1116 | platform_driver_unregister(&dw_driver); | ||
| 1117 | } | ||
| 1118 | module_exit(dw_exit); | ||
| 1119 | |||
| 1120 | MODULE_LICENSE("GPL v2"); | ||
| 1121 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | ||
| 1122 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); | ||
