diff options
| author | Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 2012-05-09 11:09:21 -0400 |
|---|---|---|
| committer | Vinod Koul <vinod.koul@linux.intel.com> | 2012-07-12 23:43:08 -0400 |
| commit | ce3a1ab74264b860450709e4bd0dcfc2d0bfc7f8 (patch) | |
| tree | 445fdd53ae7775810d0b05ca90c392ee25367889 /drivers/dma/sh | |
| parent | b8373147ed3ca01a968d81f22688f2836a9aeb6b (diff) | |
dma: shdma: convert to the shdma base library
The shdma base library has originally been extracted from the shdma driver,
which now can be converted to actually use it.
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/sh')
| -rw-r--r-- | drivers/dma/sh/shdma.c | 1122 | ||||
| -rw-r--r-- | drivers/dma/sh/shdma.h | 44 |
2 files changed, 292 insertions, 874 deletions
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 8ab4a1f5d3c1..c393b354e2b3 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * base is drivers/dma/flsdma.c | 4 | * base is drivers/dma/flsdma.c |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
| 6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
| 7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | 8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
| 8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
| @@ -34,18 +35,12 @@ | |||
| 34 | #include "../dmaengine.h" | 35 | #include "../dmaengine.h" |
| 35 | #include "shdma.h" | 36 | #include "shdma.h" |
| 36 | 37 | ||
| 37 | /* DMA descriptor control */ | 38 | #define SH_DMAE_DRV_NAME "sh-dma-engine" |
| 38 | enum sh_dmae_desc_status { | ||
| 39 | DESC_IDLE, | ||
| 40 | DESC_PREPARED, | ||
| 41 | DESC_SUBMITTED, | ||
| 42 | DESC_COMPLETED, /* completed, have to call callback */ | ||
| 43 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | ||
| 44 | }; | ||
| 45 | 39 | ||
| 46 | #define NR_DESCS_PER_CHANNEL 32 | ||
| 47 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | 40 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
| 48 | #define LOG2_DEFAULT_XFER_SIZE 2 | 41 | #define LOG2_DEFAULT_XFER_SIZE 2 |
| 42 | #define SH_DMA_SLAVE_NUMBER 256 | ||
| 43 | #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) | ||
| 49 | 44 | ||
| 50 | /* | 45 | /* |
| 51 | * Used for write-side mutual exclusion for the global device list, | 46 | * Used for write-side mutual exclusion for the global device list, |
| @@ -54,18 +49,12 @@ enum sh_dmae_desc_status { | |||
| 54 | static DEFINE_SPINLOCK(sh_dmae_lock); | 49 | static DEFINE_SPINLOCK(sh_dmae_lock); |
| 55 | static LIST_HEAD(sh_dmae_devices); | 50 | static LIST_HEAD(sh_dmae_devices); |
| 56 | 51 | ||
| 57 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | ||
| 58 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | ||
| 59 | |||
| 60 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | ||
| 61 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); | ||
| 62 | |||
| 63 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) | 52 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) |
| 64 | { | 53 | { |
| 65 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | 54 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); |
| 66 | 55 | ||
| 67 | __raw_writel(data, shdev->chan_reg + | 56 | __raw_writel(data, shdev->chan_reg + |
| 68 | shdev->pdata->channel[sh_dc->id].chclr_offset); | 57 | shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset); |
| 69 | } | 58 | } |
| 70 | 59 | ||
| 71 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 60 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
| @@ -155,11 +144,11 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) | |||
| 155 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | 144 | spin_unlock_irqrestore(&sh_dmae_lock, flags); |
| 156 | 145 | ||
| 157 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | 146 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { |
| 158 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); | 147 | dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); |
| 159 | return -EIO; | 148 | return -EIO; |
| 160 | } | 149 | } |
| 161 | if (shdev->pdata->dmaor_init & ~dmaor) | 150 | if (shdev->pdata->dmaor_init & ~dmaor) |
| 162 | dev_warn(shdev->common.dev, | 151 | dev_warn(shdev->shdma_dev.dma_dev.dev, |
| 163 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", | 152 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", |
| 164 | dmaor, shdev->pdata->dmaor_init); | 153 | dmaor, shdev->pdata->dmaor_init); |
| 165 | return 0; | 154 | return 0; |
| @@ -224,15 +213,6 @@ static void dmae_start(struct sh_dmae_chan *sh_chan) | |||
| 224 | chcr_write(sh_chan, chcr & ~CHCR_TE); | 213 | chcr_write(sh_chan, chcr & ~CHCR_TE); |
| 225 | } | 214 | } |
| 226 | 215 | ||
| 227 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | ||
| 228 | { | ||
| 229 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | ||
| 230 | u32 chcr = chcr_read(sh_chan); | ||
| 231 | |||
| 232 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); | ||
| 233 | chcr_write(sh_chan, chcr); | ||
| 234 | } | ||
| 235 | |||
| 236 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 216 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
| 237 | { | 217 | { |
| 238 | /* | 218 | /* |
| @@ -261,7 +241,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
| 261 | { | 241 | { |
| 262 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 242 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
| 263 | struct sh_dmae_pdata *pdata = shdev->pdata; | 243 | struct sh_dmae_pdata *pdata = shdev->pdata; |
| 264 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | 244 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; |
| 265 | u16 __iomem *addr = shdev->dmars; | 245 | u16 __iomem *addr = shdev->dmars; |
| 266 | unsigned int shift = chan_pdata->dmars_bit; | 246 | unsigned int shift = chan_pdata->dmars_bit; |
| 267 | 247 | ||
| @@ -282,706 +262,142 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
| 282 | return 0; | 262 | return 0; |
| 283 | } | 263 | } |
| 284 | 264 | ||
| 285 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | 265 | static void sh_dmae_start_xfer(struct shdma_chan *schan, |
| 266 | struct shdma_desc *sdesc) | ||
| 286 | { | 267 | { |
| 287 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; | 268 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
| 288 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); | 269 | shdma_chan); |
| 289 | struct sh_dmae_slave *param = tx->chan->private; | 270 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
| 290 | dma_async_tx_callback callback = tx->callback; | 271 | struct sh_dmae_desc, shdma_desc); |
| 291 | dma_cookie_t cookie; | 272 | dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", |
| 292 | bool power_up; | 273 | sdesc->async_tx.cookie, sh_chan->shdma_chan.id, |
| 293 | 274 | sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); | |
| 294 | spin_lock_irq(&sh_chan->desc_lock); | 275 | /* Get the ld start address from ld_queue */ |
| 295 | 276 | dmae_set_reg(sh_chan, &sh_desc->hw); | |
| 296 | if (list_empty(&sh_chan->ld_queue)) | 277 | dmae_start(sh_chan); |
| 297 | power_up = true; | ||
| 298 | else | ||
| 299 | power_up = false; | ||
| 300 | |||
| 301 | cookie = dma_cookie_assign(tx); | ||
| 302 | |||
| 303 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | ||
| 304 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | ||
| 305 | /* | ||
| 306 | * All chunks are on the global ld_free, so, we have to find | ||
| 307 | * the end of the chain ourselves | ||
| 308 | */ | ||
| 309 | if (chunk != desc && (chunk->mark == DESC_IDLE || | ||
| 310 | chunk->async_tx.cookie > 0 || | ||
| 311 | chunk->async_tx.cookie == -EBUSY || | ||
| 312 | &chunk->node == &sh_chan->ld_free)) | ||
| 313 | break; | ||
| 314 | chunk->mark = DESC_SUBMITTED; | ||
| 315 | /* Callback goes to the last chunk */ | ||
| 316 | chunk->async_tx.callback = NULL; | ||
| 317 | chunk->cookie = cookie; | ||
| 318 | list_move_tail(&chunk->node, &sh_chan->ld_queue); | ||
| 319 | last = chunk; | ||
| 320 | } | ||
| 321 | |||
| 322 | last->async_tx.callback = callback; | ||
| 323 | last->async_tx.callback_param = tx->callback_param; | ||
| 324 | |||
| 325 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", | ||
| 326 | tx->cookie, &last->async_tx, sh_chan->id, | ||
| 327 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); | ||
| 328 | |||
| 329 | if (power_up) { | ||
| 330 | sh_chan->pm_state = DMAE_PM_BUSY; | ||
| 331 | |||
| 332 | pm_runtime_get(sh_chan->dev); | ||
| 333 | |||
| 334 | spin_unlock_irq(&sh_chan->desc_lock); | ||
| 335 | |||
| 336 | pm_runtime_barrier(sh_chan->dev); | ||
| 337 | |||
| 338 | spin_lock_irq(&sh_chan->desc_lock); | ||
| 339 | |||
| 340 | /* Have we been reset, while waiting? */ | ||
| 341 | if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) { | ||
| 342 | dev_dbg(sh_chan->dev, "Bring up channel %d\n", | ||
| 343 | sh_chan->id); | ||
| 344 | if (param) { | ||
| 345 | const struct sh_dmae_slave_config *cfg = | ||
| 346 | param->config; | ||
| 347 | |||
| 348 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
| 349 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
| 350 | } else { | ||
| 351 | dmae_init(sh_chan); | ||
| 352 | } | ||
| 353 | |||
| 354 | if (sh_chan->pm_state == DMAE_PM_PENDING) | ||
| 355 | sh_chan_xfer_ld_queue(sh_chan); | ||
| 356 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; | ||
| 357 | } | ||
| 358 | } else { | ||
| 359 | sh_chan->pm_state = DMAE_PM_PENDING; | ||
| 360 | } | ||
| 361 | |||
| 362 | spin_unlock_irq(&sh_chan->desc_lock); | ||
| 363 | |||
| 364 | return cookie; | ||
| 365 | } | 278 | } |
| 366 | 279 | ||
| 367 | /* Called with desc_lock held */ | 280 | static bool sh_dmae_channel_busy(struct shdma_chan *schan) |
| 368 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | ||
| 369 | { | 281 | { |
| 370 | struct sh_desc *desc; | 282 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
| 371 | 283 | shdma_chan); | |
| 372 | list_for_each_entry(desc, &sh_chan->ld_free, node) | 284 | return dmae_is_busy(sh_chan); |
| 373 | if (desc->mark != DESC_PREPARED) { | ||
| 374 | BUG_ON(desc->mark != DESC_IDLE); | ||
| 375 | list_del(&desc->node); | ||
| 376 | return desc; | ||
| 377 | } | ||
| 378 | |||
| 379 | return NULL; | ||
| 380 | } | 285 | } |
| 381 | 286 | ||
| 382 | static const struct sh_dmae_slave_config *sh_dmae_find_slave( | 287 | static void sh_dmae_setup_xfer(struct shdma_chan *schan, |
| 383 | struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) | 288 | struct shdma_slave *sslave) |
| 384 | { | 289 | { |
| 385 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); | 290 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
| 386 | struct sh_dmae_pdata *pdata = shdev->pdata; | 291 | shdma_chan); |
| 387 | int i; | ||
| 388 | 292 | ||
| 389 | if (param->slave_id >= SH_DMA_SLAVE_NUMBER) | 293 | if (sslave) { |
| 390 | return NULL; | 294 | struct sh_dmae_slave *slave = container_of(sslave, |
| 391 | 295 | struct sh_dmae_slave, shdma_slave); | |
| 392 | for (i = 0; i < pdata->slave_num; i++) | 296 | const struct sh_dmae_slave_config *cfg = |
| 393 | if (pdata->slave[i].slave_id == param->slave_id) | 297 | slave->config; |
| 394 | return pdata->slave + i; | ||
| 395 | |||
| 396 | return NULL; | ||
| 397 | } | ||
| 398 | |||
| 399 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | ||
| 400 | { | ||
| 401 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
| 402 | struct sh_desc *desc; | ||
| 403 | struct sh_dmae_slave *param = chan->private; | ||
| 404 | int ret; | ||
| 405 | 298 | ||
| 406 | /* | 299 | dmae_set_dmars(sh_chan, cfg->mid_rid); |
| 407 | * This relies on the guarantee from dmaengine that alloc_chan_resources | 300 | dmae_set_chcr(sh_chan, cfg->chcr); |
| 408 | * never runs concurrently with itself or free_chan_resources. | ||
| 409 | */ | ||
| 410 | if (param) { | ||
| 411 | const struct sh_dmae_slave_config *cfg; | ||
| 412 | |||
| 413 | cfg = sh_dmae_find_slave(sh_chan, param); | ||
| 414 | if (!cfg) { | ||
| 415 | ret = -EINVAL; | ||
| 416 | goto efindslave; | ||
| 417 | } | ||
| 418 | |||
| 419 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { | ||
| 420 | ret = -EBUSY; | ||
| 421 | goto etestused; | ||
| 422 | } | ||
| 423 | |||
| 424 | param->config = cfg; | ||
| 425 | } | ||
| 426 | |||
| 427 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | ||
| 428 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); | ||
| 429 | if (!desc) | ||
| 430 | break; | ||
| 431 | dma_async_tx_descriptor_init(&desc->async_tx, | ||
| 432 | &sh_chan->common); | ||
| 433 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | ||
| 434 | desc->mark = DESC_IDLE; | ||
| 435 | |||
| 436 | list_add(&desc->node, &sh_chan->ld_free); | ||
| 437 | sh_chan->descs_allocated++; | ||
| 438 | } | ||
| 439 | |||
| 440 | if (!sh_chan->descs_allocated) { | ||
| 441 | ret = -ENOMEM; | ||
| 442 | goto edescalloc; | ||
| 443 | } | ||
| 444 | |||
| 445 | return sh_chan->descs_allocated; | ||
| 446 | |||
| 447 | edescalloc: | ||
| 448 | if (param) | ||
| 449 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
| 450 | etestused: | ||
| 451 | efindslave: | ||
| 452 | chan->private = NULL; | ||
| 453 | return ret; | ||
| 454 | } | ||
| 455 | |||
| 456 | /* | ||
| 457 | * sh_dma_free_chan_resources - Free all resources of the channel. | ||
| 458 | */ | ||
| 459 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) | ||
| 460 | { | ||
| 461 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
| 462 | struct sh_desc *desc, *_desc; | ||
| 463 | LIST_HEAD(list); | ||
| 464 | |||
| 465 | /* Protect against ISR */ | ||
| 466 | spin_lock_irq(&sh_chan->desc_lock); | ||
| 467 | dmae_halt(sh_chan); | ||
| 468 | spin_unlock_irq(&sh_chan->desc_lock); | ||
| 469 | |||
| 470 | /* Now no new interrupts will occur */ | ||
| 471 | |||
| 472 | /* Prepared and not submitted descriptors can still be on the queue */ | ||
| 473 | if (!list_empty(&sh_chan->ld_queue)) | ||
| 474 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
| 475 | |||
| 476 | if (chan->private) { | ||
| 477 | /* The caller is holding dma_list_mutex */ | ||
| 478 | struct sh_dmae_slave *param = chan->private; | ||
| 479 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
| 480 | chan->private = NULL; | ||
| 481 | } | ||
| 482 | |||
| 483 | spin_lock_irq(&sh_chan->desc_lock); | ||
| 484 | |||
| 485 | list_splice_init(&sh_chan->ld_free, &list); | ||
| 486 | sh_chan->descs_allocated = 0; | ||
| 487 | |||
| 488 | spin_unlock_irq(&sh_chan->desc_lock); | ||
| 489 | |||
| 490 | list_for_each_entry_safe(desc, _desc, &list, node) | ||
| 491 | kfree(desc); | ||
| 492 | } | ||
| 493 | |||
| 494 | /** | ||
| 495 | * sh_dmae_add_desc - get, set up and return one transfer descriptor | ||
| 496 | * @sh_chan: DMA channel | ||
| 497 | * @flags: DMA transfer flags | ||
| 498 | * @dest: destination DMA address, incremented when direction equals | ||
| 499 | * DMA_DEV_TO_MEM | ||
| 500 | * @src: source DMA address, incremented when direction equals | ||
| 501 | * DMA_MEM_TO_DEV | ||
| 502 | * @len: DMA transfer length | ||
| 503 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
| 504 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
| 505 | * equals DMA_MEM_TO_MEM for MEMCPY | ||
| 506 | * Returns 0 or an error | ||
| 507 | * Locks: called with desc_lock held | ||
| 508 | */ | ||
| 509 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | ||
| 510 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | ||
| 511 | struct sh_desc **first, enum dma_transfer_direction direction) | ||
| 512 | { | ||
| 513 | struct sh_desc *new; | ||
| 514 | size_t copy_size; | ||
| 515 | |||
| 516 | if (!*len) | ||
| 517 | return NULL; | ||
| 518 | |||
| 519 | /* Allocate the link descriptor from the free list */ | ||
| 520 | new = sh_dmae_get_desc(sh_chan); | ||
| 521 | if (!new) { | ||
| 522 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | ||
| 523 | return NULL; | ||
| 524 | } | ||
| 525 | |||
| 526 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); | ||
| 527 | |||
| 528 | new->hw.sar = *src; | ||
| 529 | new->hw.dar = *dest; | ||
| 530 | new->hw.tcr = copy_size; | ||
| 531 | |||
| 532 | if (!*first) { | ||
| 533 | /* First desc */ | ||
| 534 | new->async_tx.cookie = -EBUSY; | ||
| 535 | *first = new; | ||
| 536 | } else { | 301 | } else { |
| 537 | /* Other desc - invisible to the user */ | 302 | dmae_init(sh_chan); |
| 538 | new->async_tx.cookie = -EINVAL; | ||
| 539 | } | 303 | } |
| 540 | |||
| 541 | dev_dbg(sh_chan->dev, | ||
| 542 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | ||
| 543 | copy_size, *len, *src, *dest, &new->async_tx, | ||
| 544 | new->async_tx.cookie, sh_chan->xmit_shift); | ||
| 545 | |||
| 546 | new->mark = DESC_PREPARED; | ||
| 547 | new->async_tx.flags = flags; | ||
| 548 | new->direction = direction; | ||
| 549 | |||
| 550 | *len -= copy_size; | ||
| 551 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | ||
| 552 | *src += copy_size; | ||
| 553 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) | ||
| 554 | *dest += copy_size; | ||
| 555 | |||
| 556 | return new; | ||
| 557 | } | 304 | } |
| 558 | 305 | ||
| 559 | /* | 306 | static const struct sh_dmae_slave_config *dmae_find_slave( |
| 560 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | 307 | struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *slave) |
| 561 | * | ||
| 562 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
| 563 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
| 564 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
| 565 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
| 566 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | ||
| 567 | * and the SG list contains only one element and points at the source buffer. | ||
| 568 | */ | ||
| 569 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | ||
| 570 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
| 571 | enum dma_transfer_direction direction, unsigned long flags) | ||
| 572 | { | 308 | { |
| 573 | struct scatterlist *sg; | 309 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
| 574 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | 310 | struct sh_dmae_pdata *pdata = shdev->pdata; |
| 575 | LIST_HEAD(tx_list); | 311 | const struct sh_dmae_slave_config *cfg; |
| 576 | int chunks = 0; | ||
| 577 | unsigned long irq_flags; | ||
| 578 | int i; | 312 | int i; |
| 579 | 313 | ||
| 580 | if (!sg_len) | 314 | if (slave->shdma_slave.slave_id >= SH_DMA_SLAVE_NUMBER) |
| 581 | return NULL; | 315 | return NULL; |
| 582 | 316 | ||
| 583 | for_each_sg(sgl, sg, sg_len, i) | 317 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
| 584 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | 318 | if (cfg->slave_id == slave->shdma_slave.slave_id) |
| 585 | (SH_DMA_TCR_MAX + 1); | 319 | return cfg; |
| 586 | |||
| 587 | /* Have to lock the whole loop to protect against concurrent release */ | ||
| 588 | spin_lock_irqsave(&sh_chan->desc_lock, irq_flags); | ||
| 589 | |||
| 590 | /* | ||
| 591 | * Chaining: | ||
| 592 | * first descriptor is what user is dealing with in all API calls, its | ||
| 593 | * cookie is at first set to -EBUSY, at tx-submit to a positive | ||
| 594 | * number | ||
| 595 | * if more than one chunk is needed further chunks have cookie = -EINVAL | ||
| 596 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | ||
| 597 | * all chunks are linked onto the tx_list head with their .node heads | ||
| 598 | * only during this function, then they are immediately spliced | ||
| 599 | * back onto the free list in form of a chain | ||
| 600 | */ | ||
| 601 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 602 | dma_addr_t sg_addr = sg_dma_address(sg); | ||
| 603 | size_t len = sg_dma_len(sg); | ||
| 604 | |||
| 605 | if (!len) | ||
| 606 | goto err_get_desc; | ||
| 607 | |||
| 608 | do { | ||
| 609 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", | ||
| 610 | i, sg, len, (unsigned long long)sg_addr); | ||
| 611 | |||
| 612 | if (direction == DMA_DEV_TO_MEM) | ||
| 613 | new = sh_dmae_add_desc(sh_chan, flags, | ||
| 614 | &sg_addr, addr, &len, &first, | ||
| 615 | direction); | ||
| 616 | else | ||
| 617 | new = sh_dmae_add_desc(sh_chan, flags, | ||
| 618 | addr, &sg_addr, &len, &first, | ||
| 619 | direction); | ||
| 620 | if (!new) | ||
| 621 | goto err_get_desc; | ||
| 622 | |||
| 623 | new->chunks = chunks--; | ||
| 624 | list_add_tail(&new->node, &tx_list); | ||
| 625 | } while (len); | ||
| 626 | } | ||
| 627 | |||
| 628 | if (new != first) | ||
| 629 | new->async_tx.cookie = -ENOSPC; | ||
| 630 | |||
| 631 | /* Put them back on the free list, so, they don't get lost */ | ||
| 632 | list_splice_tail(&tx_list, &sh_chan->ld_free); | ||
| 633 | |||
| 634 | spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); | ||
| 635 | |||
| 636 | return &first->async_tx; | ||
| 637 | |||
| 638 | err_get_desc: | ||
| 639 | list_for_each_entry(new, &tx_list, node) | ||
| 640 | new->mark = DESC_IDLE; | ||
| 641 | list_splice(&tx_list, &sh_chan->ld_free); | ||
| 642 | |||
| 643 | spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); | ||
| 644 | 320 | ||
| 645 | return NULL; | 321 | return NULL; |
| 646 | } | 322 | } |
| 647 | 323 | ||
| 648 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 324 | static int sh_dmae_set_slave(struct shdma_chan *schan, |
| 649 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 325 | struct shdma_slave *sslave) |
| 650 | size_t len, unsigned long flags) | ||
| 651 | { | 326 | { |
| 652 | struct sh_dmae_chan *sh_chan; | 327 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
| 653 | struct scatterlist sg; | 328 | shdma_chan); |
| 654 | 329 | struct sh_dmae_slave *slave = container_of(sslave, struct sh_dmae_slave, | |
| 655 | if (!chan || !len) | 330 | shdma_slave); |
| 656 | return NULL; | 331 | const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave); |
| 657 | 332 | if (!cfg) | |
| 658 | sh_chan = to_sh_chan(chan); | 333 | return -ENODEV; |
| 659 | |||
| 660 | sg_init_table(&sg, 1); | ||
| 661 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
| 662 | offset_in_page(dma_src)); | ||
| 663 | sg_dma_address(&sg) = dma_src; | ||
| 664 | sg_dma_len(&sg) = len; | ||
| 665 | |||
| 666 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, | ||
| 667 | flags); | ||
| 668 | } | ||
| 669 | |||
| 670 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | ||
| 671 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
| 672 | enum dma_transfer_direction direction, unsigned long flags, | ||
| 673 | void *context) | ||
| 674 | { | ||
| 675 | struct sh_dmae_slave *param; | ||
| 676 | struct sh_dmae_chan *sh_chan; | ||
| 677 | dma_addr_t slave_addr; | ||
| 678 | |||
| 679 | if (!chan) | ||
| 680 | return NULL; | ||
| 681 | |||
| 682 | sh_chan = to_sh_chan(chan); | ||
| 683 | param = chan->private; | ||
| 684 | |||
| 685 | /* Someone calling slave DMA on a public channel? */ | ||
| 686 | if (!param || !sg_len) { | ||
| 687 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | ||
| 688 | __func__, param, sg_len, param ? param->slave_id : -1); | ||
| 689 | return NULL; | ||
| 690 | } | ||
| 691 | |||
| 692 | slave_addr = param->config->addr; | ||
| 693 | |||
| 694 | /* | ||
| 695 | * if (param != NULL), this is a successfully requested slave channel, | ||
| 696 | * therefore param->config != NULL too. | ||
| 697 | */ | ||
| 698 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, | ||
| 699 | direction, flags); | ||
| 700 | } | ||
| 701 | |||
| 702 | static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
| 703 | unsigned long arg) | ||
| 704 | { | ||
| 705 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
| 706 | unsigned long flags; | ||
| 707 | |||
| 708 | /* Only supports DMA_TERMINATE_ALL */ | ||
| 709 | if (cmd != DMA_TERMINATE_ALL) | ||
| 710 | return -ENXIO; | ||
| 711 | |||
| 712 | if (!chan) | ||
| 713 | return -EINVAL; | ||
| 714 | |||
| 715 | spin_lock_irqsave(&sh_chan->desc_lock, flags); | ||
| 716 | dmae_halt(sh_chan); | ||
| 717 | |||
| 718 | if (!list_empty(&sh_chan->ld_queue)) { | ||
| 719 | /* Record partial transfer */ | ||
| 720 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | ||
| 721 | struct sh_desc, node); | ||
| 722 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
| 723 | sh_chan->xmit_shift; | ||
| 724 | } | ||
| 725 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); | ||
| 726 | 334 | ||
| 727 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 335 | slave->config = cfg; |
| 728 | 336 | ||
| 729 | return 0; | 337 | return 0; |
| 730 | } | 338 | } |
| 731 | 339 | ||
| 732 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 340 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
| 733 | { | 341 | { |
| 734 | struct sh_desc *desc, *_desc; | 342 | struct sh_dmae_device *shdev = to_sh_dev(sh_chan); |
| 735 | /* Is the "exposed" head of a chain acked? */ | 343 | u32 chcr = chcr_read(sh_chan); |
| 736 | bool head_acked = false; | ||
| 737 | dma_cookie_t cookie = 0; | ||
| 738 | dma_async_tx_callback callback = NULL; | ||
| 739 | void *param = NULL; | ||
| 740 | unsigned long flags; | ||
| 741 | |||
| 742 | spin_lock_irqsave(&sh_chan->desc_lock, flags); | ||
| 743 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | ||
| 744 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
| 745 | |||
| 746 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | ||
| 747 | BUG_ON(desc->mark != DESC_SUBMITTED && | ||
| 748 | desc->mark != DESC_COMPLETED && | ||
| 749 | desc->mark != DESC_WAITING); | ||
| 750 | |||
| 751 | /* | ||
| 752 | * queue is ordered, and we use this loop to (1) clean up all | ||
| 753 | * completed descriptors, and to (2) update descriptor flags of | ||
| 754 | * any chunks in a (partially) completed chain | ||
| 755 | */ | ||
| 756 | if (!all && desc->mark == DESC_SUBMITTED && | ||
| 757 | desc->cookie != cookie) | ||
| 758 | break; | ||
| 759 | |||
| 760 | if (tx->cookie > 0) | ||
| 761 | cookie = tx->cookie; | ||
| 762 | |||
| 763 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | ||
| 764 | if (sh_chan->common.completed_cookie != desc->cookie - 1) | ||
| 765 | dev_dbg(sh_chan->dev, | ||
| 766 | "Completing cookie %d, expected %d\n", | ||
| 767 | desc->cookie, | ||
| 768 | sh_chan->common.completed_cookie + 1); | ||
| 769 | sh_chan->common.completed_cookie = desc->cookie; | ||
| 770 | } | ||
| 771 | |||
| 772 | /* Call callback on the last chunk */ | ||
| 773 | if (desc->mark == DESC_COMPLETED && tx->callback) { | ||
| 774 | desc->mark = DESC_WAITING; | ||
| 775 | callback = tx->callback; | ||
| 776 | param = tx->callback_param; | ||
| 777 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", | ||
| 778 | tx->cookie, tx, sh_chan->id); | ||
| 779 | BUG_ON(desc->chunks != 1); | ||
| 780 | break; | ||
| 781 | } | ||
| 782 | |||
| 783 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | ||
| 784 | if (desc->mark == DESC_COMPLETED) { | ||
| 785 | BUG_ON(tx->cookie < 0); | ||
| 786 | desc->mark = DESC_WAITING; | ||
| 787 | } | ||
| 788 | head_acked = async_tx_test_ack(tx); | ||
| 789 | } else { | ||
| 790 | switch (desc->mark) { | ||
| 791 | case DESC_COMPLETED: | ||
| 792 | desc->mark = DESC_WAITING; | ||
| 793 | /* Fall through */ | ||
| 794 | case DESC_WAITING: | ||
| 795 | if (head_acked) | ||
| 796 | async_tx_ack(&desc->async_tx); | ||
| 797 | } | ||
| 798 | } | ||
| 799 | |||
| 800 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", | ||
| 801 | tx, tx->cookie); | ||
| 802 | |||
| 803 | if (((desc->mark == DESC_COMPLETED || | ||
| 804 | desc->mark == DESC_WAITING) && | ||
| 805 | async_tx_test_ack(&desc->async_tx)) || all) { | ||
| 806 | /* Remove from ld_queue list */ | ||
| 807 | desc->mark = DESC_IDLE; | ||
| 808 | |||
| 809 | list_move(&desc->node, &sh_chan->ld_free); | ||
| 810 | |||
| 811 | if (list_empty(&sh_chan->ld_queue)) { | ||
| 812 | dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); | ||
| 813 | pm_runtime_put(sh_chan->dev); | ||
| 814 | } | ||
| 815 | } | ||
| 816 | } | ||
| 817 | |||
| 818 | if (all && !callback) | ||
| 819 | /* | ||
| 820 | * Terminating and the loop completed normally: forgive | ||
| 821 | * uncompleted cookies | ||
| 822 | */ | ||
| 823 | sh_chan->common.completed_cookie = sh_chan->common.cookie; | ||
| 824 | |||
| 825 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); | ||
| 826 | |||
| 827 | if (callback) | ||
| 828 | callback(param); | ||
| 829 | 344 | ||
| 830 | return callback; | 345 | chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); |
| 346 | chcr_write(sh_chan, chcr); | ||
| 831 | } | 347 | } |
| 832 | 348 | ||
| 833 | /* | 349 | static int sh_dmae_desc_setup(struct shdma_chan *schan, |
| 834 | * sh_chan_ld_cleanup - Clean up link descriptors | 350 | struct shdma_desc *sdesc, |
| 835 | * | 351 | dma_addr_t src, dma_addr_t dst, size_t *len) |
| 836 | * This function cleans up the ld_queue of DMA channel. | ||
| 837 | */ | ||
| 838 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | ||
| 839 | { | 352 | { |
| 840 | while (__ld_cleanup(sh_chan, all)) | 353 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
| 841 | ; | 354 | struct sh_dmae_desc, shdma_desc); |
| 842 | } | ||
| 843 | 355 | ||
| 844 | /* Called under spin_lock_irq(&sh_chan->desc_lock) */ | 356 | if (*len > schan->max_xfer_len) |
| 845 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 357 | *len = schan->max_xfer_len; |
| 846 | { | ||
| 847 | struct sh_desc *desc; | ||
| 848 | 358 | ||
| 849 | /* DMA work check */ | 359 | sh_desc->hw.sar = src; |
| 850 | if (dmae_is_busy(sh_chan)) | 360 | sh_desc->hw.dar = dst; |
| 851 | return; | 361 | sh_desc->hw.tcr = *len; |
| 852 | |||
| 853 | /* Find the first not transferred descriptor */ | ||
| 854 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
| 855 | if (desc->mark == DESC_SUBMITTED) { | ||
| 856 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
| 857 | desc->async_tx.cookie, sh_chan->id, | ||
| 858 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); | ||
| 859 | /* Get the ld start address from ld_queue */ | ||
| 860 | dmae_set_reg(sh_chan, &desc->hw); | ||
| 861 | dmae_start(sh_chan); | ||
| 862 | break; | ||
| 863 | } | ||
| 864 | } | ||
| 865 | 362 | ||
| 866 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | 363 | return 0; |
| 867 | { | ||
| 868 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
| 869 | |||
| 870 | spin_lock_irq(&sh_chan->desc_lock); | ||
| 871 | if (sh_chan->pm_state == DMAE_PM_ESTABLISHED) | ||
| 872 | sh_chan_xfer_ld_queue(sh_chan); | ||
| 873 | else | ||
| 874 | sh_chan->pm_state = DMAE_PM_PENDING; | ||
| 875 | spin_unlock_irq(&sh_chan->desc_lock); | ||
| 876 | } | 364 | } |
| 877 | 365 | ||
| 878 | static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | 366 | static void sh_dmae_halt(struct shdma_chan *schan) |
| 879 | dma_cookie_t cookie, | ||
| 880 | struct dma_tx_state *txstate) | ||
| 881 | { | 367 | { |
| 882 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 368 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
| 883 | enum dma_status status; | 369 | shdma_chan); |
| 884 | unsigned long flags; | 370 | dmae_halt(sh_chan); |
| 885 | |||
| 886 | sh_dmae_chan_ld_cleanup(sh_chan, false); | ||
| 887 | |||
| 888 | spin_lock_irqsave(&sh_chan->desc_lock, flags); | ||
| 889 | |||
| 890 | status = dma_cookie_status(chan, cookie, txstate); | ||
| 891 | |||
| 892 | /* | ||
| 893 | * If we don't find cookie on the queue, it has been aborted and we have | ||
| 894 | * to report error | ||
| 895 | */ | ||
| 896 | if (status != DMA_SUCCESS) { | ||
| 897 | struct sh_desc *desc; | ||
| 898 | status = DMA_ERROR; | ||
| 899 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
| 900 | if (desc->cookie == cookie) { | ||
| 901 | status = DMA_IN_PROGRESS; | ||
| 902 | break; | ||
| 903 | } | ||
| 904 | } | ||
| 905 | |||
| 906 | spin_unlock_irqrestore(&sh_chan->desc_lock, flags); | ||
| 907 | |||
| 908 | return status; | ||
| 909 | } | 371 | } |
| 910 | 372 | ||
| 911 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 373 | static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) |
| 912 | { | 374 | { |
| 913 | irqreturn_t ret = IRQ_NONE; | 375 | struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, |
| 914 | struct sh_dmae_chan *sh_chan = data; | 376 | shdma_chan); |
| 915 | u32 chcr; | ||
| 916 | |||
| 917 | spin_lock(&sh_chan->desc_lock); | ||
| 918 | |||
| 919 | chcr = chcr_read(sh_chan); | ||
| 920 | 377 | ||
| 921 | if (chcr & CHCR_TE) { | 378 | if (!(chcr_read(sh_chan) & CHCR_TE)) |
| 922 | /* DMA stop */ | 379 | return false; |
| 923 | dmae_halt(sh_chan); | ||
| 924 | |||
| 925 | ret = IRQ_HANDLED; | ||
| 926 | tasklet_schedule(&sh_chan->tasklet); | ||
| 927 | } | ||
| 928 | 380 | ||
| 929 | spin_unlock(&sh_chan->desc_lock); | 381 | /* DMA stop */ |
| 382 | dmae_halt(sh_chan); | ||
| 930 | 383 | ||
| 931 | return ret; | 384 | return true; |
| 932 | } | 385 | } |
| 933 | 386 | ||
| 934 | /* Called from error IRQ or NMI */ | 387 | /* Called from error IRQ or NMI */ |
| 935 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) | 388 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) |
| 936 | { | 389 | { |
| 937 | unsigned int handled = 0; | 390 | bool ret; |
| 938 | int i; | ||
| 939 | 391 | ||
| 940 | /* halt the dma controller */ | 392 | /* halt the dma controller */ |
| 941 | sh_dmae_ctl_stop(shdev); | 393 | sh_dmae_ctl_stop(shdev); |
| 942 | 394 | ||
| 943 | /* We cannot detect, which channel caused the error, have to reset all */ | 395 | /* We cannot detect, which channel caused the error, have to reset all */ |
| 944 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | 396 | ret = shdma_reset(&shdev->shdma_dev); |
| 945 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
| 946 | struct sh_desc *desc; | ||
| 947 | LIST_HEAD(dl); | ||
| 948 | |||
| 949 | if (!sh_chan) | ||
| 950 | continue; | ||
| 951 | |||
| 952 | spin_lock(&sh_chan->desc_lock); | ||
| 953 | |||
| 954 | /* Stop the channel */ | ||
| 955 | dmae_halt(sh_chan); | ||
| 956 | |||
| 957 | list_splice_init(&sh_chan->ld_queue, &dl); | ||
| 958 | |||
| 959 | if (!list_empty(&dl)) { | ||
| 960 | dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); | ||
| 961 | pm_runtime_put(sh_chan->dev); | ||
| 962 | } | ||
| 963 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; | ||
| 964 | |||
| 965 | spin_unlock(&sh_chan->desc_lock); | ||
| 966 | |||
| 967 | /* Complete all */ | ||
| 968 | list_for_each_entry(desc, &dl, node) { | ||
| 969 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
| 970 | desc->mark = DESC_IDLE; | ||
| 971 | if (tx->callback) | ||
| 972 | tx->callback(tx->callback_param); | ||
| 973 | } | ||
| 974 | |||
| 975 | spin_lock(&sh_chan->desc_lock); | ||
| 976 | list_splice(&dl, &sh_chan->ld_free); | ||
| 977 | spin_unlock(&sh_chan->desc_lock); | ||
| 978 | |||
| 979 | handled++; | ||
| 980 | } | ||
| 981 | 397 | ||
| 982 | sh_dmae_rst(shdev); | 398 | sh_dmae_rst(shdev); |
| 983 | 399 | ||
| 984 | return !!handled; | 400 | return ret; |
| 985 | } | 401 | } |
| 986 | 402 | ||
| 987 | static irqreturn_t sh_dmae_err(int irq, void *data) | 403 | static irqreturn_t sh_dmae_err(int irq, void *data) |
| @@ -991,35 +407,24 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
| 991 | if (!(dmaor_read(shdev) & DMAOR_AE)) | 407 | if (!(dmaor_read(shdev) & DMAOR_AE)) |
| 992 | return IRQ_NONE; | 408 | return IRQ_NONE; |
| 993 | 409 | ||
| 994 | sh_dmae_reset(data); | 410 | sh_dmae_reset(shdev); |
| 995 | return IRQ_HANDLED; | 411 | return IRQ_HANDLED; |
| 996 | } | 412 | } |
| 997 | 413 | ||
| 998 | static void dmae_do_tasklet(unsigned long data) | 414 | static bool sh_dmae_desc_completed(struct shdma_chan *schan, |
| 415 | struct shdma_desc *sdesc) | ||
| 999 | { | 416 | { |
| 1000 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 417 | struct sh_dmae_chan *sh_chan = container_of(schan, |
| 1001 | struct sh_desc *desc; | 418 | struct sh_dmae_chan, shdma_chan); |
| 419 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | ||
| 420 | struct sh_dmae_desc, shdma_desc); | ||
| 1002 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 421 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
| 1003 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | 422 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); |
| 1004 | 423 | ||
| 1005 | spin_lock_irq(&sh_chan->desc_lock); | 424 | return (sdesc->direction == DMA_DEV_TO_MEM && |
| 1006 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 425 | (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || |
| 1007 | if (desc->mark == DESC_SUBMITTED && | 426 | (sdesc->direction != DMA_DEV_TO_MEM && |
| 1008 | ((desc->direction == DMA_DEV_TO_MEM && | 427 | (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); |
| 1009 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | ||
| 1010 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | ||
| 1011 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | ||
| 1012 | desc->async_tx.cookie, &desc->async_tx, | ||
| 1013 | desc->hw.dar); | ||
| 1014 | desc->mark = DESC_COMPLETED; | ||
| 1015 | break; | ||
| 1016 | } | ||
| 1017 | } | ||
| 1018 | /* Next desc */ | ||
| 1019 | sh_chan_xfer_ld_queue(sh_chan); | ||
| 1020 | spin_unlock_irq(&sh_chan->desc_lock); | ||
| 1021 | |||
| 1022 | sh_dmae_chan_ld_cleanup(sh_chan, false); | ||
| 1023 | } | 428 | } |
| 1024 | 429 | ||
| 1025 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | 430 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) |
| @@ -1073,97 +478,174 @@ static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | |||
| 1073 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | 478 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
| 1074 | int irq, unsigned long flags) | 479 | int irq, unsigned long flags) |
| 1075 | { | 480 | { |
| 1076 | int err; | ||
| 1077 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; | 481 | const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; |
| 1078 | struct platform_device *pdev = to_platform_device(shdev->common.dev); | 482 | struct shdma_dev *sdev = &shdev->shdma_dev; |
| 1079 | struct sh_dmae_chan *new_sh_chan; | 483 | struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); |
| 484 | struct sh_dmae_chan *sh_chan; | ||
| 485 | struct shdma_chan *schan; | ||
| 486 | int err; | ||
| 1080 | 487 | ||
| 1081 | /* alloc channel */ | 488 | sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); |
| 1082 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | 489 | if (!sh_chan) { |
| 1083 | if (!new_sh_chan) { | 490 | dev_err(sdev->dma_dev.dev, |
| 1084 | dev_err(shdev->common.dev, | ||
| 1085 | "No free memory for allocating dma channels!\n"); | 491 | "No free memory for allocating dma channels!\n"); |
| 1086 | return -ENOMEM; | 492 | return -ENOMEM; |
| 1087 | } | 493 | } |
| 1088 | 494 | ||
| 1089 | new_sh_chan->pm_state = DMAE_PM_ESTABLISHED; | 495 | schan = &sh_chan->shdma_chan; |
| 1090 | 496 | schan->max_xfer_len = SH_DMA_TCR_MAX + 1; | |
| 1091 | /* reference struct dma_device */ | ||
| 1092 | new_sh_chan->common.device = &shdev->common; | ||
| 1093 | dma_cookie_init(&new_sh_chan->common); | ||
| 1094 | 497 | ||
| 1095 | new_sh_chan->dev = shdev->common.dev; | 498 | shdma_chan_probe(sdev, schan, id); |
| 1096 | new_sh_chan->id = id; | ||
| 1097 | new_sh_chan->irq = irq; | ||
| 1098 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
| 1099 | 499 | ||
| 1100 | /* Init DMA tasklet */ | 500 | sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); |
| 1101 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | ||
| 1102 | (unsigned long)new_sh_chan); | ||
| 1103 | |||
| 1104 | spin_lock_init(&new_sh_chan->desc_lock); | ||
| 1105 | |||
| 1106 | /* Init descripter manage list */ | ||
| 1107 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | ||
| 1108 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | ||
| 1109 | |||
| 1110 | /* Add the channel to DMA device channel list */ | ||
| 1111 | list_add_tail(&new_sh_chan->common.device_node, | ||
| 1112 | &shdev->common.channels); | ||
| 1113 | shdev->common.chancnt++; | ||
| 1114 | 501 | ||
| 502 | /* set up channel irq */ | ||
| 1115 | if (pdev->id >= 0) | 503 | if (pdev->id >= 0) |
| 1116 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 504 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), |
| 1117 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); | 505 | "sh-dmae%d.%d", pdev->id, id); |
| 1118 | else | 506 | else |
| 1119 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 507 | snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), |
| 1120 | "sh-dma%d", new_sh_chan->id); | 508 | "sh-dma%d", id); |
| 1121 | 509 | ||
| 1122 | /* set up channel irq */ | 510 | err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); |
| 1123 | err = request_irq(irq, &sh_dmae_interrupt, flags, | ||
| 1124 | new_sh_chan->dev_id, new_sh_chan); | ||
| 1125 | if (err) { | 511 | if (err) { |
| 1126 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 512 | dev_err(sdev->dma_dev.dev, |
| 1127 | "with return %d\n", id, err); | 513 | "DMA channel %d request_irq error %d\n", |
| 514 | id, err); | ||
| 1128 | goto err_no_irq; | 515 | goto err_no_irq; |
| 1129 | } | 516 | } |
| 1130 | 517 | ||
| 1131 | shdev->chan[id] = new_sh_chan; | 518 | shdev->chan[id] = sh_chan; |
| 1132 | return 0; | 519 | return 0; |
| 1133 | 520 | ||
| 1134 | err_no_irq: | 521 | err_no_irq: |
| 1135 | /* remove from dmaengine device node */ | 522 | /* remove from dmaengine device node */ |
| 1136 | list_del(&new_sh_chan->common.device_node); | 523 | shdma_chan_remove(schan); |
| 1137 | kfree(new_sh_chan); | 524 | kfree(sh_chan); |
| 1138 | return err; | 525 | return err; |
| 1139 | } | 526 | } |
| 1140 | 527 | ||
| 1141 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | 528 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) |
| 1142 | { | 529 | { |
| 530 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
| 531 | struct shdma_chan *schan; | ||
| 1143 | int i; | 532 | int i; |
| 1144 | 533 | ||
| 1145 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | 534 | shdma_for_each_chan(schan, &shdev->shdma_dev, i) { |
| 1146 | if (shdev->chan[i]) { | 535 | struct sh_dmae_chan *sh_chan = container_of(schan, |
| 1147 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 536 | struct sh_dmae_chan, shdma_chan); |
| 537 | BUG_ON(!schan); | ||
| 1148 | 538 | ||
| 1149 | free_irq(sh_chan->irq, sh_chan); | 539 | shdma_free_irq(&sh_chan->shdma_chan); |
| 1150 | 540 | ||
| 1151 | list_del(&sh_chan->common.device_node); | 541 | shdma_chan_remove(schan); |
| 1152 | kfree(sh_chan); | 542 | kfree(sh_chan); |
| 1153 | shdev->chan[i] = NULL; | 543 | } |
| 544 | dma_dev->chancnt = 0; | ||
| 545 | } | ||
| 546 | |||
| 547 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
| 548 | { | ||
| 549 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
| 550 | sh_dmae_ctl_stop(shdev); | ||
| 551 | } | ||
| 552 | |||
| 553 | static int sh_dmae_runtime_suspend(struct device *dev) | ||
| 554 | { | ||
| 555 | return 0; | ||
| 556 | } | ||
| 557 | |||
| 558 | static int sh_dmae_runtime_resume(struct device *dev) | ||
| 559 | { | ||
| 560 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
| 561 | |||
| 562 | return sh_dmae_rst(shdev); | ||
| 563 | } | ||
| 564 | |||
| 565 | #ifdef CONFIG_PM | ||
| 566 | static int sh_dmae_suspend(struct device *dev) | ||
| 567 | { | ||
| 568 | return 0; | ||
| 569 | } | ||
| 570 | |||
| 571 | static int sh_dmae_resume(struct device *dev) | ||
| 572 | { | ||
| 573 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
| 574 | int i, ret; | ||
| 575 | |||
| 576 | ret = sh_dmae_rst(shdev); | ||
| 577 | if (ret < 0) | ||
| 578 | dev_err(dev, "Failed to reset!\n"); | ||
| 579 | |||
| 580 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
| 581 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
| 582 | struct sh_dmae_slave *param = sh_chan->shdma_chan.dma_chan.private; | ||
| 583 | |||
| 584 | if (!sh_chan->shdma_chan.desc_num) | ||
| 585 | continue; | ||
| 586 | |||
| 587 | if (param) { | ||
| 588 | const struct sh_dmae_slave_config *cfg = param->config; | ||
| 589 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
| 590 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
| 591 | } else { | ||
| 592 | dmae_init(sh_chan); | ||
| 1154 | } | 593 | } |
| 1155 | } | 594 | } |
| 1156 | shdev->common.chancnt = 0; | 595 | |
| 596 | return 0; | ||
| 1157 | } | 597 | } |
| 598 | #else | ||
| 599 | #define sh_dmae_suspend NULL | ||
| 600 | #define sh_dmae_resume NULL | ||
| 601 | #endif | ||
| 1158 | 602 | ||
| 1159 | static int __init sh_dmae_probe(struct platform_device *pdev) | 603 | const struct dev_pm_ops sh_dmae_pm = { |
| 604 | .suspend = sh_dmae_suspend, | ||
| 605 | .resume = sh_dmae_resume, | ||
| 606 | .runtime_suspend = sh_dmae_runtime_suspend, | ||
| 607 | .runtime_resume = sh_dmae_runtime_resume, | ||
| 608 | }; | ||
| 609 | |||
| 610 | static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) | ||
| 611 | { | ||
| 612 | struct sh_dmae_slave *param = schan->dma_chan.private; | ||
| 613 | |||
| 614 | /* | ||
| 615 | * Implicit BUG_ON(!param) | ||
| 616 | * if (param != NULL), this is a successfully requested slave channel, | ||
| 617 | * therefore param->config != NULL too. | ||
| 618 | */ | ||
| 619 | return param->config->addr; | ||
| 620 | } | ||
| 621 | |||
| 622 | static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) | ||
| 623 | { | ||
| 624 | return &((struct sh_dmae_desc *)buf)[i].shdma_desc; | ||
| 625 | } | ||
| 626 | |||
| 627 | static const struct shdma_ops sh_dmae_shdma_ops = { | ||
| 628 | .desc_completed = sh_dmae_desc_completed, | ||
| 629 | .halt_channel = sh_dmae_halt, | ||
| 630 | .channel_busy = sh_dmae_channel_busy, | ||
| 631 | .slave_addr = sh_dmae_slave_addr, | ||
| 632 | .desc_setup = sh_dmae_desc_setup, | ||
| 633 | .set_slave = sh_dmae_set_slave, | ||
| 634 | .setup_xfer = sh_dmae_setup_xfer, | ||
| 635 | .start_xfer = sh_dmae_start_xfer, | ||
| 636 | .embedded_desc = sh_dmae_embedded_desc, | ||
| 637 | .chan_irq = sh_dmae_chan_irq, | ||
| 638 | }; | ||
| 639 | |||
| 640 | static int __devinit sh_dmae_probe(struct platform_device *pdev) | ||
| 1160 | { | 641 | { |
| 1161 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | 642 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
| 1162 | unsigned long irqflags = IRQF_DISABLED, | 643 | unsigned long irqflags = IRQF_DISABLED, |
| 1163 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; | 644 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; |
| 1164 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; | 645 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; |
| 1165 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | 646 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; |
| 1166 | struct sh_dmae_device *shdev; | 647 | struct sh_dmae_device *shdev; |
| 648 | struct dma_device *dma_dev; | ||
| 1167 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | 649 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; |
| 1168 | 650 | ||
| 1169 | /* get platform data */ | 651 | /* get platform data */ |
| @@ -1211,6 +693,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1211 | goto ealloc; | 693 | goto ealloc; |
| 1212 | } | 694 | } |
| 1213 | 695 | ||
| 696 | dma_dev = &shdev->shdma_dev.dma_dev; | ||
| 697 | |||
| 1214 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | 698 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); |
| 1215 | if (!shdev->chan_reg) | 699 | if (!shdev->chan_reg) |
| 1216 | goto emapchan; | 700 | goto emapchan; |
| @@ -1220,8 +704,23 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1220 | goto emapdmars; | 704 | goto emapdmars; |
| 1221 | } | 705 | } |
| 1222 | 706 | ||
| 707 | if (!pdata->slave_only) | ||
| 708 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
| 709 | if (pdata->slave && pdata->slave_num) | ||
| 710 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
| 711 | |||
| 712 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
| 713 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
| 714 | |||
| 715 | shdev->shdma_dev.ops = &sh_dmae_shdma_ops; | ||
| 716 | shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); | ||
| 717 | err = shdma_init(&pdev->dev, &shdev->shdma_dev, | ||
| 718 | pdata->channel_num); | ||
| 719 | if (err < 0) | ||
| 720 | goto eshdma; | ||
| 721 | |||
| 1223 | /* platform data */ | 722 | /* platform data */ |
| 1224 | shdev->pdata = pdata; | 723 | shdev->pdata = pdev->dev.platform_data; |
| 1225 | 724 | ||
| 1226 | if (pdata->chcr_offset) | 725 | if (pdata->chcr_offset) |
| 1227 | shdev->chcr_offset = pdata->chcr_offset; | 726 | shdev->chcr_offset = pdata->chcr_offset; |
| @@ -1235,10 +734,10 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1235 | 734 | ||
| 1236 | platform_set_drvdata(pdev, shdev); | 735 | platform_set_drvdata(pdev, shdev); |
| 1237 | 736 | ||
| 1238 | shdev->common.dev = &pdev->dev; | ||
| 1239 | |||
| 1240 | pm_runtime_enable(&pdev->dev); | 737 | pm_runtime_enable(&pdev->dev); |
| 1241 | pm_runtime_get_sync(&pdev->dev); | 738 | err = pm_runtime_get_sync(&pdev->dev); |
| 739 | if (err < 0) | ||
| 740 | dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); | ||
| 1242 | 741 | ||
| 1243 | spin_lock_irq(&sh_dmae_lock); | 742 | spin_lock_irq(&sh_dmae_lock); |
| 1244 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | 743 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); |
| @@ -1249,27 +748,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1249 | if (err) | 748 | if (err) |
| 1250 | goto rst_err; | 749 | goto rst_err; |
| 1251 | 750 | ||
| 1252 | INIT_LIST_HEAD(&shdev->common.channels); | ||
| 1253 | |||
| 1254 | if (!pdata->slave_only) | ||
| 1255 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | ||
| 1256 | if (pdata->slave && pdata->slave_num) | ||
| 1257 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
| 1258 | |||
| 1259 | shdev->common.device_alloc_chan_resources | ||
| 1260 | = sh_dmae_alloc_chan_resources; | ||
| 1261 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | ||
| 1262 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | ||
| 1263 | shdev->common.device_tx_status = sh_dmae_tx_status; | ||
| 1264 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | ||
| 1265 | |||
| 1266 | /* Compulsory for DMA_SLAVE fields */ | ||
| 1267 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | ||
| 1268 | shdev->common.device_control = sh_dmae_control; | ||
| 1269 | |||
| 1270 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
| 1271 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
| 1272 | |||
| 1273 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 751 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
| 1274 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | 752 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
| 1275 | 753 | ||
| @@ -1301,7 +779,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1301 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | 779 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
| 1302 | /* Special case - all multiplexed */ | 780 | /* Special case - all multiplexed */ |
| 1303 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | 781 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
| 1304 | if (irq_cnt < SH_DMAC_MAX_CHANNELS) { | 782 | if (irq_cnt < SH_DMAE_MAX_CHANNELS) { |
| 1305 | chan_irq[irq_cnt] = chanirq_res->start; | 783 | chan_irq[irq_cnt] = chanirq_res->start; |
| 1306 | chan_flag[irq_cnt] = IRQF_SHARED; | 784 | chan_flag[irq_cnt] = IRQF_SHARED; |
| 1307 | } else { | 785 | } else { |
| @@ -1312,7 +790,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1312 | } else { | 790 | } else { |
| 1313 | do { | 791 | do { |
| 1314 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | 792 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { |
| 1315 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { | 793 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { |
| 1316 | irq_cap = 1; | 794 | irq_cap = 1; |
| 1317 | break; | 795 | break; |
| 1318 | } | 796 | } |
| @@ -1328,7 +806,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1328 | chan_irq[irq_cnt++] = i; | 806 | chan_irq[irq_cnt++] = i; |
| 1329 | } | 807 | } |
| 1330 | 808 | ||
| 1331 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) | 809 | if (irq_cnt >= SH_DMAE_MAX_CHANNELS) |
| 1332 | break; | 810 | break; |
| 1333 | 811 | ||
| 1334 | chanirq_res = platform_get_resource(pdev, | 812 | chanirq_res = platform_get_resource(pdev, |
| @@ -1346,14 +824,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 1346 | if (irq_cap) | 824 | if (irq_cap) |
| 1347 | dev_notice(&pdev->dev, "Attempting to register %d DMA " | 825 | dev_notice(&pdev->dev, "Attempting to register %d DMA " |
| 1348 | "channels when a maximum of %d are supported.\n", | 826 | "channels when a maximum of %d are supported.\n", |
| 1349 | pdata->channel_num, SH_DMAC_MAX_CHANNELS); | 827 | pdata->channel_num, SH_DMAE_MAX_CHANNELS); |
| 1350 | 828 | ||
| 1351 | pm_runtime_put(&pdev->dev); | 829 | pm_runtime_put(&pdev->dev); |
| 1352 | 830 | ||
| 1353 | dma_async_device_register(&shdev->common); | 831 | err = dma_async_device_register(&shdev->shdma_dev.dma_dev); |
| 832 | if (err < 0) | ||
| 833 | goto edmadevreg; | ||
| 1354 | 834 | ||
| 1355 | return err; | 835 | return err; |
| 1356 | 836 | ||
| 837 | edmadevreg: | ||
| 838 | pm_runtime_get(&pdev->dev); | ||
| 839 | |||
| 1357 | chan_probe_err: | 840 | chan_probe_err: |
| 1358 | sh_dmae_chan_remove(shdev); | 841 | sh_dmae_chan_remove(shdev); |
| 1359 | 842 | ||
| @@ -1369,10 +852,11 @@ rst_err: | |||
| 1369 | pm_runtime_put(&pdev->dev); | 852 | pm_runtime_put(&pdev->dev); |
| 1370 | pm_runtime_disable(&pdev->dev); | 853 | pm_runtime_disable(&pdev->dev); |
| 1371 | 854 | ||
| 855 | platform_set_drvdata(pdev, NULL); | ||
| 856 | shdma_cleanup(&shdev->shdma_dev); | ||
| 857 | eshdma: | ||
| 1372 | if (dmars) | 858 | if (dmars) |
| 1373 | iounmap(shdev->dmars); | 859 | iounmap(shdev->dmars); |
| 1374 | |||
| 1375 | platform_set_drvdata(pdev, NULL); | ||
| 1376 | emapdmars: | 860 | emapdmars: |
| 1377 | iounmap(shdev->chan_reg); | 861 | iounmap(shdev->chan_reg); |
| 1378 | synchronize_rcu(); | 862 | synchronize_rcu(); |
| @@ -1387,13 +871,14 @@ ermrdmars: | |||
| 1387 | return err; | 871 | return err; |
| 1388 | } | 872 | } |
| 1389 | 873 | ||
| 1390 | static int __exit sh_dmae_remove(struct platform_device *pdev) | 874 | static int __devexit sh_dmae_remove(struct platform_device *pdev) |
| 1391 | { | 875 | { |
| 1392 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 876 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
| 877 | struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; | ||
| 1393 | struct resource *res; | 878 | struct resource *res; |
| 1394 | int errirq = platform_get_irq(pdev, 0); | 879 | int errirq = platform_get_irq(pdev, 0); |
| 1395 | 880 | ||
| 1396 | dma_async_device_unregister(&shdev->common); | 881 | dma_async_device_unregister(dma_dev); |
| 1397 | 882 | ||
| 1398 | if (errirq > 0) | 883 | if (errirq > 0) |
| 1399 | free_irq(errirq, shdev); | 884 | free_irq(errirq, shdev); |
| @@ -1402,11 +887,11 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
| 1402 | list_del_rcu(&shdev->node); | 887 | list_del_rcu(&shdev->node); |
| 1403 | spin_unlock_irq(&sh_dmae_lock); | 888 | spin_unlock_irq(&sh_dmae_lock); |
| 1404 | 889 | ||
| 1405 | /* channel data remove */ | ||
| 1406 | sh_dmae_chan_remove(shdev); | ||
| 1407 | |||
| 1408 | pm_runtime_disable(&pdev->dev); | 890 | pm_runtime_disable(&pdev->dev); |
| 1409 | 891 | ||
| 892 | sh_dmae_chan_remove(shdev); | ||
| 893 | shdma_cleanup(&shdev->shdma_dev); | ||
| 894 | |||
| 1410 | if (shdev->dmars) | 895 | if (shdev->dmars) |
| 1411 | iounmap(shdev->dmars); | 896 | iounmap(shdev->dmars); |
| 1412 | iounmap(shdev->chan_reg); | 897 | iounmap(shdev->chan_reg); |
| @@ -1426,77 +911,14 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
| 1426 | return 0; | 911 | return 0; |
| 1427 | } | 912 | } |
| 1428 | 913 | ||
| 1429 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
| 1430 | { | ||
| 1431 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
| 1432 | sh_dmae_ctl_stop(shdev); | ||
| 1433 | } | ||
| 1434 | |||
| 1435 | static int sh_dmae_runtime_suspend(struct device *dev) | ||
| 1436 | { | ||
| 1437 | return 0; | ||
| 1438 | } | ||
| 1439 | |||
| 1440 | static int sh_dmae_runtime_resume(struct device *dev) | ||
| 1441 | { | ||
| 1442 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
| 1443 | |||
| 1444 | return sh_dmae_rst(shdev); | ||
| 1445 | } | ||
| 1446 | |||
| 1447 | #ifdef CONFIG_PM | ||
| 1448 | static int sh_dmae_suspend(struct device *dev) | ||
| 1449 | { | ||
| 1450 | return 0; | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | static int sh_dmae_resume(struct device *dev) | ||
| 1454 | { | ||
| 1455 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
| 1456 | int i, ret; | ||
| 1457 | |||
| 1458 | ret = sh_dmae_rst(shdev); | ||
| 1459 | if (ret < 0) | ||
| 1460 | dev_err(dev, "Failed to reset!\n"); | ||
| 1461 | |||
| 1462 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
| 1463 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
| 1464 | struct sh_dmae_slave *param = sh_chan->common.private; | ||
| 1465 | |||
| 1466 | if (!sh_chan->descs_allocated) | ||
| 1467 | continue; | ||
| 1468 | |||
| 1469 | if (param) { | ||
| 1470 | const struct sh_dmae_slave_config *cfg = param->config; | ||
| 1471 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
| 1472 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
| 1473 | } else { | ||
| 1474 | dmae_init(sh_chan); | ||
| 1475 | } | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | return 0; | ||
| 1479 | } | ||
| 1480 | #else | ||
| 1481 | #define sh_dmae_suspend NULL | ||
| 1482 | #define sh_dmae_resume NULL | ||
| 1483 | #endif | ||
| 1484 | |||
| 1485 | const struct dev_pm_ops sh_dmae_pm = { | ||
| 1486 | .suspend = sh_dmae_suspend, | ||
| 1487 | .resume = sh_dmae_resume, | ||
| 1488 | .runtime_suspend = sh_dmae_runtime_suspend, | ||
| 1489 | .runtime_resume = sh_dmae_runtime_resume, | ||
| 1490 | }; | ||
| 1491 | |||
| 1492 | static struct platform_driver sh_dmae_driver = { | 914 | static struct platform_driver sh_dmae_driver = { |
| 1493 | .remove = __exit_p(sh_dmae_remove), | 915 | .driver = { |
| 1494 | .shutdown = sh_dmae_shutdown, | ||
| 1495 | .driver = { | ||
| 1496 | .owner = THIS_MODULE, | 916 | .owner = THIS_MODULE, |
| 1497 | .name = "sh-dma-engine", | ||
| 1498 | .pm = &sh_dmae_pm, | 917 | .pm = &sh_dmae_pm, |
| 918 | .name = SH_DMAE_DRV_NAME, | ||
| 1499 | }, | 919 | }, |
| 920 | .remove = __devexit_p(sh_dmae_remove), | ||
| 921 | .shutdown = sh_dmae_shutdown, | ||
| 1500 | }; | 922 | }; |
| 1501 | 923 | ||
| 1502 | static int __init sh_dmae_init(void) | 924 | static int __init sh_dmae_init(void) |
| @@ -1521,4 +943,4 @@ module_exit(sh_dmae_exit); | |||
| 1521 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | 943 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); |
| 1522 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | 944 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); |
| 1523 | MODULE_LICENSE("GPL"); | 945 | MODULE_LICENSE("GPL"); |
| 1524 | MODULE_ALIAS("platform:sh-dma-engine"); | 946 | MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); |
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h index 0b1d2c105f02..840e47d1c86c 100644 --- a/drivers/dma/sh/shdma.h +++ b/drivers/dma/sh/shdma.h | |||
| @@ -13,42 +13,27 @@ | |||
| 13 | #ifndef __DMA_SHDMA_H | 13 | #ifndef __DMA_SHDMA_H |
| 14 | #define __DMA_SHDMA_H | 14 | #define __DMA_SHDMA_H |
| 15 | 15 | ||
| 16 | #include <linux/shdma-base.h> | ||
| 16 | #include <linux/dmaengine.h> | 17 | #include <linux/dmaengine.h> |
| 17 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
| 18 | #include <linux/list.h> | 19 | #include <linux/list.h> |
| 19 | 20 | ||
| 20 | #define SH_DMAC_MAX_CHANNELS 20 | 21 | #define SH_DMAE_MAX_CHANNELS 20 |
| 21 | #define SH_DMA_SLAVE_NUMBER 256 | 22 | #define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */ |
| 22 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ | ||
| 23 | 23 | ||
| 24 | struct device; | 24 | struct device; |
| 25 | 25 | ||
| 26 | enum dmae_pm_state { | ||
| 27 | DMAE_PM_ESTABLISHED, | ||
| 28 | DMAE_PM_BUSY, | ||
| 29 | DMAE_PM_PENDING, | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct sh_dmae_chan { | 26 | struct sh_dmae_chan { |
| 33 | spinlock_t desc_lock; /* Descriptor operation lock */ | 27 | struct shdma_chan shdma_chan; |
| 34 | struct list_head ld_queue; /* Link descriptors queue */ | ||
| 35 | struct list_head ld_free; /* Link descriptors free */ | ||
| 36 | struct dma_chan common; /* DMA common channel */ | ||
| 37 | struct device *dev; /* Channel device */ | ||
| 38 | struct tasklet_struct tasklet; /* Tasklet */ | ||
| 39 | int descs_allocated; /* desc count */ | ||
| 40 | int xmit_shift; /* log_2(bytes_per_xfer) */ | 28 | int xmit_shift; /* log_2(bytes_per_xfer) */ |
| 41 | int irq; | ||
| 42 | int id; /* Raw id of this channel */ | ||
| 43 | u32 __iomem *base; | 29 | u32 __iomem *base; |
| 44 | char dev_id[16]; /* unique name per DMAC of channel */ | 30 | char dev_id[16]; /* unique name per DMAC of channel */ |
| 45 | int pm_error; | 31 | int pm_error; |
| 46 | enum dmae_pm_state pm_state; | ||
| 47 | }; | 32 | }; |
| 48 | 33 | ||
| 49 | struct sh_dmae_device { | 34 | struct sh_dmae_device { |
| 50 | struct dma_device common; | 35 | struct shdma_dev shdma_dev; |
| 51 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; | 36 | struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS]; |
| 52 | struct sh_dmae_pdata *pdata; | 37 | struct sh_dmae_pdata *pdata; |
| 53 | struct list_head node; | 38 | struct list_head node; |
| 54 | u32 __iomem *chan_reg; | 39 | u32 __iomem *chan_reg; |
| @@ -57,10 +42,21 @@ struct sh_dmae_device { | |||
| 57 | u32 chcr_ie_bit; | 42 | u32 chcr_ie_bit; |
| 58 | }; | 43 | }; |
| 59 | 44 | ||
| 60 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) | 45 | struct sh_dmae_regs { |
| 46 | u32 sar; /* SAR / source address */ | ||
| 47 | u32 dar; /* DAR / destination address */ | ||
| 48 | u32 tcr; /* TCR / transfer count */ | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct sh_dmae_desc { | ||
| 52 | struct sh_dmae_regs hw; | ||
| 53 | struct shdma_desc shdma_desc; | ||
| 54 | }; | ||
| 55 | |||
| 56 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan) | ||
| 61 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) | 57 | #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) |
| 62 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) | 58 | #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) |
| 63 | #define to_sh_dev(chan) container_of(chan->common.device,\ | 59 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ |
| 64 | struct sh_dmae_device, common) | 60 | struct sh_dmae_device, shdma_dev.dma_dev) |
| 65 | 61 | ||
| 66 | #endif /* __DMA_SHDMA_H */ | 62 | #endif /* __DMA_SHDMA_H */ |
