diff options
Diffstat (limited to 'drivers/dma/shdma.c')
| -rw-r--r-- | drivers/dma/shdma.c | 411 |
1 files changed, 297 insertions, 114 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index d10cc899c460..b75ce8b84c46 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
| @@ -48,23 +48,20 @@ enum sh_dmae_desc_status { | |||
| 48 | */ | 48 | */ |
| 49 | #define RS_DEFAULT (RS_DUAL) | 49 | #define RS_DEFAULT (RS_DUAL) |
| 50 | 50 | ||
| 51 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | ||
| 52 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; | ||
| 53 | |||
| 51 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 54 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
| 52 | 55 | ||
| 53 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | 56 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
| 54 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 57 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
| 55 | { | 58 | { |
| 56 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 59 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
| 57 | } | 60 | } |
| 58 | 61 | ||
| 59 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 62 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
| 60 | { | 63 | { |
| 61 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 64 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); |
| 62 | } | ||
| 63 | |||
| 64 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
| 65 | { | ||
| 66 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | ||
| 67 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
| 68 | } | 65 | } |
| 69 | 66 | ||
| 70 | /* | 67 | /* |
| @@ -95,27 +92,30 @@ static int sh_dmae_rst(int id) | |||
| 95 | return 0; | 92 | return 0; |
| 96 | } | 93 | } |
| 97 | 94 | ||
| 98 | static int dmae_is_busy(struct sh_dmae_chan *sh_chan) | 95 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
| 99 | { | 96 | { |
| 100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 97 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
| 101 | if (chcr & CHCR_DE) { | 98 | |
| 102 | if (!(chcr & CHCR_TE)) | 99 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
| 103 | return -EBUSY; /* working */ | 100 | return true; /* working */ |
| 104 | } | 101 | |
| 105 | return 0; /* waiting */ | 102 | return false; /* waiting */ |
| 106 | } | 103 | } |
| 107 | 104 | ||
| 108 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | 105 | static unsigned int ts_shift[] = TS_SHIFT; |
| 106 | static inline unsigned int calc_xmit_shift(u32 chcr) | ||
| 109 | { | 107 | { |
| 110 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 108 | int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | |
| 111 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; | 109 | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); |
| 110 | |||
| 111 | return ts_shift[cnt]; | ||
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | 114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
| 115 | { | 115 | { |
| 116 | sh_dmae_writel(sh_chan, hw->sar, SAR); | 116 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
| 117 | sh_dmae_writel(sh_chan, hw->dar, DAR); | 117 | sh_dmae_writel(sh_chan, hw->dar, DAR); |
| 118 | sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR); | 118 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 121 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
| @@ -123,7 +123,7 @@ static void dmae_start(struct sh_dmae_chan *sh_chan) | |||
| 123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 123 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
| 124 | 124 | ||
| 125 | chcr |= CHCR_DE | CHCR_IE; | 125 | chcr |= CHCR_DE | CHCR_IE; |
| 126 | sh_dmae_writel(sh_chan, chcr, CHCR); | 126 | sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | 129 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
| @@ -134,55 +134,50 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
| 134 | sh_dmae_writel(sh_chan, chcr, CHCR); | 134 | sh_dmae_writel(sh_chan, chcr, CHCR); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
| 138 | { | ||
| 139 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | ||
| 140 | sh_chan->xmit_shift = calc_xmit_shift(chcr); | ||
| 141 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
| 142 | } | ||
| 143 | |||
| 137 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 144 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
| 138 | { | 145 | { |
| 139 | int ret = dmae_is_busy(sh_chan); | ||
| 140 | /* When DMA was working, can not set data to CHCR */ | 146 | /* When DMA was working, can not set data to CHCR */ |
| 141 | if (ret) | 147 | if (dmae_is_busy(sh_chan)) |
| 142 | return ret; | 148 | return -EBUSY; |
| 143 | 149 | ||
| 150 | sh_chan->xmit_shift = calc_xmit_shift(val); | ||
| 144 | sh_dmae_writel(sh_chan, val, CHCR); | 151 | sh_dmae_writel(sh_chan, val, CHCR); |
| 152 | |||
| 145 | return 0; | 153 | return 0; |
| 146 | } | 154 | } |
| 147 | 155 | ||
| 148 | #define DMARS1_ADDR 0x04 | 156 | #define DMARS_SHIFT 8 |
| 149 | #define DMARS2_ADDR 0x08 | 157 | #define DMARS_CHAN_MSK 0x01 |
| 150 | #define DMARS_SHIFT 8 | ||
| 151 | #define DMARS_CHAN_MSK 0x01 | ||
| 152 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 158 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
| 153 | { | 159 | { |
| 154 | u32 addr; | 160 | u32 addr; |
| 155 | int shift = 0; | 161 | int shift = 0; |
| 156 | int ret = dmae_is_busy(sh_chan); | 162 | |
| 157 | if (ret) | 163 | if (dmae_is_busy(sh_chan)) |
| 158 | return ret; | 164 | return -EBUSY; |
| 159 | 165 | ||
| 160 | if (sh_chan->id & DMARS_CHAN_MSK) | 166 | if (sh_chan->id & DMARS_CHAN_MSK) |
| 161 | shift = DMARS_SHIFT; | 167 | shift = DMARS_SHIFT; |
| 162 | 168 | ||
| 163 | switch (sh_chan->id) { | 169 | if (sh_chan->id < 6) |
| 164 | /* DMARS0 */ | 170 | /* DMA0RS0 - DMA0RS2 */ |
| 165 | case 0: | 171 | addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; |
| 166 | case 1: | 172 | #ifdef SH_DMARS_BASE1 |
| 167 | addr = SH_DMARS_BASE; | 173 | else if (sh_chan->id < 12) |
| 168 | break; | 174 | /* DMA1RS0 - DMA1RS2 */ |
| 169 | /* DMARS1 */ | 175 | addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; |
| 170 | case 2: | 176 | #endif |
| 171 | case 3: | 177 | else |
| 172 | addr = (SH_DMARS_BASE + DMARS1_ADDR); | ||
| 173 | break; | ||
| 174 | /* DMARS2 */ | ||
| 175 | case 4: | ||
| 176 | case 5: | ||
| 177 | addr = (SH_DMARS_BASE + DMARS2_ADDR); | ||
| 178 | break; | ||
| 179 | default: | ||
| 180 | return -EINVAL; | 178 | return -EINVAL; |
| 181 | } | ||
| 182 | 179 | ||
| 183 | ctrl_outw((val << shift) | | 180 | ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); |
| 184 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), | ||
| 185 | addr); | ||
| 186 | 181 | ||
| 187 | return 0; | 182 | return 0; |
| 188 | } | 183 | } |
| @@ -250,10 +245,53 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | |||
| 250 | return NULL; | 245 | return NULL; |
| 251 | } | 246 | } |
| 252 | 247 | ||
| 248 | static struct sh_dmae_slave_config *sh_dmae_find_slave( | ||
| 249 | struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) | ||
| 250 | { | ||
| 251 | struct dma_device *dma_dev = sh_chan->common.device; | ||
| 252 | struct sh_dmae_device *shdev = container_of(dma_dev, | ||
| 253 | struct sh_dmae_device, common); | ||
| 254 | struct sh_dmae_pdata *pdata = &shdev->pdata; | ||
| 255 | int i; | ||
| 256 | |||
| 257 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) | ||
| 258 | return NULL; | ||
| 259 | |||
| 260 | for (i = 0; i < pdata->config_num; i++) | ||
| 261 | if (pdata->config[i].slave_id == slave_id) | ||
| 262 | return pdata->config + i; | ||
| 263 | |||
| 264 | return NULL; | ||
| 265 | } | ||
| 266 | |||
| 253 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | 267 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
| 254 | { | 268 | { |
| 255 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 269 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
| 256 | struct sh_desc *desc; | 270 | struct sh_desc *desc; |
| 271 | struct sh_dmae_slave *param = chan->private; | ||
| 272 | |||
| 273 | /* | ||
| 274 | * This relies on the guarantee from dmaengine that alloc_chan_resources | ||
| 275 | * never runs concurrently with itself or free_chan_resources. | ||
| 276 | */ | ||
| 277 | if (param) { | ||
| 278 | struct sh_dmae_slave_config *cfg; | ||
| 279 | |||
| 280 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); | ||
| 281 | if (!cfg) | ||
| 282 | return -EINVAL; | ||
| 283 | |||
| 284 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) | ||
| 285 | return -EBUSY; | ||
| 286 | |||
| 287 | param->config = cfg; | ||
| 288 | |||
| 289 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
| 290 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
| 291 | } else { | ||
| 292 | if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) | ||
| 293 | dmae_set_chcr(sh_chan, RS_DEFAULT); | ||
| 294 | } | ||
| 257 | 295 | ||
| 258 | spin_lock_bh(&sh_chan->desc_lock); | 296 | spin_lock_bh(&sh_chan->desc_lock); |
| 259 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | 297 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { |
| @@ -286,10 +324,18 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
| 286 | struct sh_desc *desc, *_desc; | 324 | struct sh_desc *desc, *_desc; |
| 287 | LIST_HEAD(list); | 325 | LIST_HEAD(list); |
| 288 | 326 | ||
| 327 | dmae_halt(sh_chan); | ||
| 328 | |||
| 289 | /* Prepared and not submitted descriptors can still be on the queue */ | 329 | /* Prepared and not submitted descriptors can still be on the queue */ |
| 290 | if (!list_empty(&sh_chan->ld_queue)) | 330 | if (!list_empty(&sh_chan->ld_queue)) |
| 291 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 331 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
| 292 | 332 | ||
| 333 | if (chan->private) { | ||
| 334 | /* The caller is holding dma_list_mutex */ | ||
| 335 | struct sh_dmae_slave *param = chan->private; | ||
| 336 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
| 337 | } | ||
| 338 | |||
| 293 | spin_lock_bh(&sh_chan->desc_lock); | 339 | spin_lock_bh(&sh_chan->desc_lock); |
| 294 | 340 | ||
| 295 | list_splice_init(&sh_chan->ld_free, &list); | 341 | list_splice_init(&sh_chan->ld_free, &list); |
| @@ -301,23 +347,97 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
| 301 | kfree(desc); | 347 | kfree(desc); |
| 302 | } | 348 | } |
| 303 | 349 | ||
| 304 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 350 | /** |
| 305 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 351 | * sh_dmae_add_desc - get, set up and return one transfer descriptor |
| 306 | size_t len, unsigned long flags) | 352 | * @sh_chan: DMA channel |
| 353 | * @flags: DMA transfer flags | ||
| 354 | * @dest: destination DMA address, incremented when direction equals | ||
| 355 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | ||
| 356 | * @src: source DMA address, incremented when direction equals | ||
| 357 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | ||
| 358 | * @len: DMA transfer length | ||
| 359 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
| 360 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
| 361 | * equals DMA_BIDIRECTIONAL for MEMCPY | ||
| 362 | * Returns 0 or an error | ||
| 363 | * Locks: called with desc_lock held | ||
| 364 | */ | ||
| 365 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | ||
| 366 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | ||
| 367 | struct sh_desc **first, enum dma_data_direction direction) | ||
| 307 | { | 368 | { |
| 308 | struct sh_dmae_chan *sh_chan; | 369 | struct sh_desc *new; |
| 309 | struct sh_desc *first = NULL, *prev = NULL, *new; | ||
| 310 | size_t copy_size; | 370 | size_t copy_size; |
| 311 | LIST_HEAD(tx_list); | ||
| 312 | int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1); | ||
| 313 | 371 | ||
| 314 | if (!chan) | 372 | if (!*len) |
| 315 | return NULL; | 373 | return NULL; |
| 316 | 374 | ||
| 317 | if (!len) | 375 | /* Allocate the link descriptor from the free list */ |
| 376 | new = sh_dmae_get_desc(sh_chan); | ||
| 377 | if (!new) { | ||
| 378 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | ||
| 318 | return NULL; | 379 | return NULL; |
| 380 | } | ||
| 319 | 381 | ||
| 320 | sh_chan = to_sh_chan(chan); | 382 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); |
| 383 | |||
| 384 | new->hw.sar = *src; | ||
| 385 | new->hw.dar = *dest; | ||
| 386 | new->hw.tcr = copy_size; | ||
| 387 | |||
| 388 | if (!*first) { | ||
| 389 | /* First desc */ | ||
| 390 | new->async_tx.cookie = -EBUSY; | ||
| 391 | *first = new; | ||
| 392 | } else { | ||
| 393 | /* Other desc - invisible to the user */ | ||
| 394 | new->async_tx.cookie = -EINVAL; | ||
| 395 | } | ||
| 396 | |||
| 397 | dev_dbg(sh_chan->dev, | ||
| 398 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | ||
| 399 | copy_size, *len, *src, *dest, &new->async_tx, | ||
| 400 | new->async_tx.cookie, sh_chan->xmit_shift); | ||
| 401 | |||
| 402 | new->mark = DESC_PREPARED; | ||
| 403 | new->async_tx.flags = flags; | ||
| 404 | new->direction = direction; | ||
| 405 | |||
| 406 | *len -= copy_size; | ||
| 407 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | ||
| 408 | *src += copy_size; | ||
| 409 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | ||
| 410 | *dest += copy_size; | ||
| 411 | |||
| 412 | return new; | ||
| 413 | } | ||
| 414 | |||
| 415 | /* | ||
| 416 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | ||
| 417 | * | ||
| 418 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
| 419 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
| 420 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
| 421 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
| 422 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | ||
| 423 | * and the SG list contains only one element and points at the source buffer. | ||
| 424 | */ | ||
| 425 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | ||
| 426 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
| 427 | enum dma_data_direction direction, unsigned long flags) | ||
| 428 | { | ||
| 429 | struct scatterlist *sg; | ||
| 430 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | ||
| 431 | LIST_HEAD(tx_list); | ||
| 432 | int chunks = 0; | ||
| 433 | int i; | ||
| 434 | |||
| 435 | if (!sg_len) | ||
| 436 | return NULL; | ||
| 437 | |||
| 438 | for_each_sg(sgl, sg, sg_len, i) | ||
| 439 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | ||
| 440 | (SH_DMA_TCR_MAX + 1); | ||
| 321 | 441 | ||
| 322 | /* Have to lock the whole loop to protect against concurrent release */ | 442 | /* Have to lock the whole loop to protect against concurrent release */ |
| 323 | spin_lock_bh(&sh_chan->desc_lock); | 443 | spin_lock_bh(&sh_chan->desc_lock); |
| @@ -333,49 +453,32 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
| 333 | * only during this function, then they are immediately spliced | 453 | * only during this function, then they are immediately spliced |
| 334 | * back onto the free list in form of a chain | 454 | * back onto the free list in form of a chain |
| 335 | */ | 455 | */ |
| 336 | do { | 456 | for_each_sg(sgl, sg, sg_len, i) { |
| 337 | /* Allocate the link descriptor from the free list */ | 457 | dma_addr_t sg_addr = sg_dma_address(sg); |
| 338 | new = sh_dmae_get_desc(sh_chan); | 458 | size_t len = sg_dma_len(sg); |
| 339 | if (!new) { | 459 | |
| 340 | dev_err(sh_chan->dev, | 460 | if (!len) |
| 341 | "No free memory for link descriptor\n"); | 461 | goto err_get_desc; |
| 342 | list_for_each_entry(new, &tx_list, node) | 462 | |
| 343 | new->mark = DESC_IDLE; | 463 | do { |
| 344 | list_splice(&tx_list, &sh_chan->ld_free); | 464 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
| 345 | spin_unlock_bh(&sh_chan->desc_lock); | 465 | i, sg, len, (unsigned long long)sg_addr); |
| 346 | return NULL; | 466 | |
| 347 | } | 467 | if (direction == DMA_FROM_DEVICE) |
| 348 | 468 | new = sh_dmae_add_desc(sh_chan, flags, | |
| 349 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1); | 469 | &sg_addr, addr, &len, &first, |
| 350 | 470 | direction); | |
| 351 | new->hw.sar = dma_src; | 471 | else |
| 352 | new->hw.dar = dma_dest; | 472 | new = sh_dmae_add_desc(sh_chan, flags, |
| 353 | new->hw.tcr = copy_size; | 473 | addr, &sg_addr, &len, &first, |
| 354 | if (!first) { | 474 | direction); |
| 355 | /* First desc */ | 475 | if (!new) |
| 356 | new->async_tx.cookie = -EBUSY; | 476 | goto err_get_desc; |
| 357 | first = new; | 477 | |
| 358 | } else { | 478 | new->chunks = chunks--; |
| 359 | /* Other desc - invisible to the user */ | 479 | list_add_tail(&new->node, &tx_list); |
| 360 | new->async_tx.cookie = -EINVAL; | 480 | } while (len); |
| 361 | } | 481 | } |
| 362 | |||
| 363 | dev_dbg(sh_chan->dev, | ||
| 364 | "chaining %u of %u with %p, dst %x, cookie %d\n", | ||
| 365 | copy_size, len, &new->async_tx, dma_dest, | ||
| 366 | new->async_tx.cookie); | ||
| 367 | |||
| 368 | new->mark = DESC_PREPARED; | ||
| 369 | new->async_tx.flags = flags; | ||
| 370 | new->chunks = chunks--; | ||
| 371 | |||
| 372 | prev = new; | ||
| 373 | len -= copy_size; | ||
| 374 | dma_src += copy_size; | ||
| 375 | dma_dest += copy_size; | ||
| 376 | /* Insert the link descriptor to the LD ring */ | ||
| 377 | list_add_tail(&new->node, &tx_list); | ||
| 378 | } while (len); | ||
| 379 | 482 | ||
| 380 | if (new != first) | 483 | if (new != first) |
| 381 | new->async_tx.cookie = -ENOSPC; | 484 | new->async_tx.cookie = -ENOSPC; |
| @@ -386,6 +489,77 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
| 386 | spin_unlock_bh(&sh_chan->desc_lock); | 489 | spin_unlock_bh(&sh_chan->desc_lock); |
| 387 | 490 | ||
| 388 | return &first->async_tx; | 491 | return &first->async_tx; |
| 492 | |||
| 493 | err_get_desc: | ||
| 494 | list_for_each_entry(new, &tx_list, node) | ||
| 495 | new->mark = DESC_IDLE; | ||
| 496 | list_splice(&tx_list, &sh_chan->ld_free); | ||
| 497 | |||
| 498 | spin_unlock_bh(&sh_chan->desc_lock); | ||
| 499 | |||
| 500 | return NULL; | ||
| 501 | } | ||
| 502 | |||
| 503 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | ||
| 504 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
| 505 | size_t len, unsigned long flags) | ||
| 506 | { | ||
| 507 | struct sh_dmae_chan *sh_chan; | ||
| 508 | struct scatterlist sg; | ||
| 509 | |||
| 510 | if (!chan || !len) | ||
| 511 | return NULL; | ||
| 512 | |||
| 513 | chan->private = NULL; | ||
| 514 | |||
| 515 | sh_chan = to_sh_chan(chan); | ||
| 516 | |||
| 517 | sg_init_table(&sg, 1); | ||
| 518 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
| 519 | offset_in_page(dma_src)); | ||
| 520 | sg_dma_address(&sg) = dma_src; | ||
| 521 | sg_dma_len(&sg) = len; | ||
| 522 | |||
| 523 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | ||
| 524 | flags); | ||
| 525 | } | ||
| 526 | |||
| 527 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | ||
| 528 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
| 529 | enum dma_data_direction direction, unsigned long flags) | ||
| 530 | { | ||
| 531 | struct sh_dmae_slave *param; | ||
| 532 | struct sh_dmae_chan *sh_chan; | ||
| 533 | |||
| 534 | if (!chan) | ||
| 535 | return NULL; | ||
| 536 | |||
| 537 | sh_chan = to_sh_chan(chan); | ||
| 538 | param = chan->private; | ||
| 539 | |||
| 540 | /* Someone calling slave DMA on a public channel? */ | ||
| 541 | if (!param || !sg_len) { | ||
| 542 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | ||
| 543 | __func__, param, sg_len, param ? param->slave_id : -1); | ||
| 544 | return NULL; | ||
| 545 | } | ||
| 546 | |||
| 547 | /* | ||
| 548 | * if (param != NULL), this is a successfully requested slave channel, | ||
| 549 | * therefore param->config != NULL too. | ||
| 550 | */ | ||
| 551 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr, | ||
| 552 | direction, flags); | ||
| 553 | } | ||
| 554 | |||
| 555 | static void sh_dmae_terminate_all(struct dma_chan *chan) | ||
| 556 | { | ||
| 557 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
| 558 | |||
| 559 | if (!chan) | ||
| 560 | return; | ||
| 561 | |||
| 562 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
| 389 | } | 563 | } |
| 390 | 564 | ||
| 391 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 565 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
| @@ -419,7 +593,11 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all | |||
| 419 | cookie = tx->cookie; | 593 | cookie = tx->cookie; |
| 420 | 594 | ||
| 421 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | 595 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
| 422 | BUG_ON(sh_chan->completed_cookie != desc->cookie - 1); | 596 | if (sh_chan->completed_cookie != desc->cookie - 1) |
| 597 | dev_dbg(sh_chan->dev, | ||
| 598 | "Completing cookie %d, expected %d\n", | ||
| 599 | desc->cookie, | ||
| 600 | sh_chan->completed_cookie + 1); | ||
| 423 | sh_chan->completed_cookie = desc->cookie; | 601 | sh_chan->completed_cookie = desc->cookie; |
| 424 | } | 602 | } |
| 425 | 603 | ||
| @@ -492,7 +670,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
| 492 | return; | 670 | return; |
| 493 | } | 671 | } |
| 494 | 672 | ||
| 495 | /* Find the first un-transfer desciptor */ | 673 | /* Find the first not transferred desciptor */ |
| 496 | list_for_each_entry(sd, &sh_chan->ld_queue, node) | 674 | list_for_each_entry(sd, &sh_chan->ld_queue, node) |
| 497 | if (sd->mark == DESC_SUBMITTED) { | 675 | if (sd->mark == DESC_SUBMITTED) { |
| 498 | /* Get the ld start address from ld_queue */ | 676 | /* Get the ld start address from ld_queue */ |
| @@ -559,7 +737,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
| 559 | 737 | ||
| 560 | /* IRQ Multi */ | 738 | /* IRQ Multi */ |
| 561 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 739 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
| 562 | int cnt = 0; | 740 | int __maybe_unused cnt = 0; |
| 563 | switch (irq) { | 741 | switch (irq) { |
| 564 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 742 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
| 565 | case DMTE6_IRQ: | 743 | case DMTE6_IRQ: |
| @@ -596,11 +774,14 @@ static void dmae_do_tasklet(unsigned long data) | |||
| 596 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 774 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
| 597 | struct sh_desc *desc; | 775 | struct sh_desc *desc; |
| 598 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 776 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
| 777 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | ||
| 599 | 778 | ||
| 600 | spin_lock(&sh_chan->desc_lock); | 779 | spin_lock(&sh_chan->desc_lock); |
| 601 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 780 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
| 602 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf && | 781 | if (desc->mark == DESC_SUBMITTED && |
| 603 | desc->mark == DESC_SUBMITTED) { | 782 | ((desc->direction == DMA_FROM_DEVICE && |
| 783 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | ||
| 784 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | ||
| 604 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | 785 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", |
| 605 | desc->async_tx.cookie, &desc->async_tx, | 786 | desc->async_tx.cookie, &desc->async_tx, |
| 606 | desc->hw.dar); | 787 | desc->hw.dar); |
| @@ -673,7 +854,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
| 673 | } | 854 | } |
| 674 | 855 | ||
| 675 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 856 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
| 676 | "sh-dmae%d", new_sh_chan->id); | 857 | "sh-dmae%d", new_sh_chan->id); |
| 677 | 858 | ||
| 678 | /* set up channel irq */ | 859 | /* set up channel irq */ |
| 679 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, | 860 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, |
| @@ -684,11 +865,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
| 684 | goto err_no_irq; | 865 | goto err_no_irq; |
| 685 | } | 866 | } |
| 686 | 867 | ||
| 687 | /* CHCR register control function */ | ||
| 688 | new_sh_chan->set_chcr = dmae_set_chcr; | ||
| 689 | /* DMARS register control function */ | ||
| 690 | new_sh_chan->set_dmars = dmae_set_dmars; | ||
| 691 | |||
| 692 | shdev->chan[id] = new_sh_chan; | 868 | shdev->chan[id] = new_sh_chan; |
| 693 | return 0; | 869 | return 0; |
| 694 | 870 | ||
| @@ -759,12 +935,19 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
| 759 | INIT_LIST_HEAD(&shdev->common.channels); | 935 | INIT_LIST_HEAD(&shdev->common.channels); |
| 760 | 936 | ||
| 761 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 937 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
| 938 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
| 939 | |||
| 762 | shdev->common.device_alloc_chan_resources | 940 | shdev->common.device_alloc_chan_resources |
| 763 | = sh_dmae_alloc_chan_resources; | 941 | = sh_dmae_alloc_chan_resources; |
| 764 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | 942 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; |
| 765 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | 943 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; |
| 766 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; | 944 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; |
| 767 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | 945 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
| 946 | |||
| 947 | /* Compulsory for DMA_SLAVE fields */ | ||
| 948 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | ||
| 949 | shdev->common.device_terminate_all = sh_dmae_terminate_all; | ||
| 950 | |||
| 768 | shdev->common.dev = &pdev->dev; | 951 | shdev->common.dev = &pdev->dev; |
| 769 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 952 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
| 770 | shdev->common.copy_align = 5; | 953 | shdev->common.copy_align = 5; |
