diff options
author | Linus Walleij <linus.walleij@stericsson.com> | 2010-06-20 17:26:07 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-06-22 21:01:55 -0400 |
commit | 508849ade23c1167bfbdf557259398adfe7044b9 (patch) | |
tree | e494544350342ea83a8c1a7b3fd8d4c4056e3057 /drivers/dma | |
parent | 1d392a7ba43300b0bde877de15121b261d7a6ce2 (diff) |
DMAENGINE: ste_dma40: allocate LCLA dynamically
Switch to allocating LCLA in memory instead of having a fixed
address.
Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ste_dma40.c | 253 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 5 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 12 |
3 files changed, 168 insertions, 102 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8c46bb803dbb..5748e96f00de 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -30,6 +30,12 @@ | |||
30 | /* Maximum iterations taken before giving up suspending a channel */ | 30 | /* Maximum iterations taken before giving up suspending a channel */ |
31 | #define D40_SUSPEND_MAX_IT 500 | 31 | #define D40_SUSPEND_MAX_IT 500 |
32 | 32 | ||
33 | /* Hardware requirement on LCLA alignment */ | ||
34 | #define LCLA_ALIGNMENT 0x40000 | ||
35 | /* Attempts before giving up to trying to get pages that are aligned */ | ||
36 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | ||
37 | |||
38 | /* Bit markings for allocation map */ | ||
33 | #define D40_ALLOC_FREE (1 << 31) | 39 | #define D40_ALLOC_FREE (1 << 31) |
34 | #define D40_ALLOC_PHY (1 << 30) | 40 | #define D40_ALLOC_PHY (1 << 30) |
35 | #define D40_ALLOC_LOG_FREE 0 | 41 | #define D40_ALLOC_LOG_FREE 0 |
@@ -64,9 +70,9 @@ enum d40_command { | |||
64 | */ | 70 | */ |
65 | struct d40_lli_pool { | 71 | struct d40_lli_pool { |
66 | void *base; | 72 | void *base; |
67 | int size; | 73 | int size; |
68 | /* Space for dst and src, plus an extra for padding */ | 74 | /* Space for dst and src, plus an extra for padding */ |
69 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 75 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
70 | }; | 76 | }; |
71 | 77 | ||
72 | /** | 78 | /** |
@@ -111,18 +117,20 @@ struct d40_desc { | |||
111 | /** | 117 | /** |
112 | * struct d40_lcla_pool - LCLA pool settings and data. | 118 | * struct d40_lcla_pool - LCLA pool settings and data. |
113 | * | 119 | * |
114 | * @base: The virtual address of LCLA. | 120 | * @base: The virtual address of LCLA. 18 bit aligned. |
115 | * @phy: Physical base address of LCLA. | 121 | * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. |
116 | * @base_size: size of lcla. | 122 | * This pointer is only there for clean-up on error. |
123 | * @pages: The number of pages needed for all physical channels. | ||
124 | * Only used later for clean-up on error | ||
117 | * @lock: Lock to protect the content in this struct. | 125 | * @lock: Lock to protect the content in this struct. |
118 | * @alloc_map: Mapping between physical channel and LCLA entries. | 126 | * @alloc_map: Bitmap mapping between physical channel and LCLA entries. |
119 | * @num_blocks: The number of entries of alloc_map. Equals to the | 127 | * @num_blocks: The number of entries of alloc_map. Equals to the |
120 | * number of physical channels. | 128 | * number of physical channels. |
121 | */ | 129 | */ |
122 | struct d40_lcla_pool { | 130 | struct d40_lcla_pool { |
123 | void *base; | 131 | void *base; |
124 | dma_addr_t phy; | 132 | void *base_unaligned; |
125 | resource_size_t base_size; | 133 | int pages; |
126 | spinlock_t lock; | 134 | spinlock_t lock; |
127 | u32 *alloc_map; | 135 | u32 *alloc_map; |
128 | int num_blocks; | 136 | int num_blocks; |
@@ -432,13 +440,12 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
432 | 440 | ||
433 | /* Support functions for logical channels */ | 441 | /* Support functions for logical channels */ |
434 | 442 | ||
435 | static int d40_lcla_id_get(struct d40_chan *d40c, | 443 | static int d40_lcla_id_get(struct d40_chan *d40c) |
436 | struct d40_lcla_pool *pool) | ||
437 | { | 444 | { |
438 | int src_id = 0; | 445 | int src_id = 0; |
439 | int dst_id = 0; | 446 | int dst_id = 0; |
440 | struct d40_log_lli *lcla_lidx_base = | 447 | struct d40_log_lli *lcla_lidx_base = |
441 | pool->base + d40c->phy_chan->num * 1024; | 448 | d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; |
442 | int i; | 449 | int i; |
443 | int lli_per_log = d40c->base->plat_data->llis_per_log; | 450 | int lli_per_log = d40c->base->plat_data->llis_per_log; |
444 | unsigned long flags; | 451 | unsigned long flags; |
@@ -446,24 +453,28 @@ static int d40_lcla_id_get(struct d40_chan *d40c, | |||
446 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) | 453 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) |
447 | return 0; | 454 | return 0; |
448 | 455 | ||
449 | if (pool->num_blocks > 32) | 456 | if (d40c->base->lcla_pool.num_blocks > 32) |
450 | return -EINVAL; | 457 | return -EINVAL; |
451 | 458 | ||
452 | spin_lock_irqsave(&pool->lock, flags); | 459 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
453 | 460 | ||
454 | for (i = 0; i < pool->num_blocks; i++) { | 461 | for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { |
455 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 462 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & |
456 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 463 | (0x1 << i))) { |
464 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | ||
465 | (0x1 << i); | ||
457 | break; | 466 | break; |
458 | } | 467 | } |
459 | } | 468 | } |
460 | src_id = i; | 469 | src_id = i; |
461 | if (src_id >= pool->num_blocks) | 470 | if (src_id >= d40c->base->lcla_pool.num_blocks) |
462 | goto err; | 471 | goto err; |
463 | 472 | ||
464 | for (; i < pool->num_blocks; i++) { | 473 | for (; i < d40c->base->lcla_pool.num_blocks; i++) { |
465 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 474 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & |
466 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 475 | (0x1 << i))) { |
476 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | ||
477 | (0x1 << i); | ||
467 | break; | 478 | break; |
468 | } | 479 | } |
469 | } | 480 | } |
@@ -477,29 +488,13 @@ static int d40_lcla_id_get(struct d40_chan *d40c, | |||
477 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; | 488 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; |
478 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; | 489 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; |
479 | 490 | ||
480 | 491 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | |
481 | spin_unlock_irqrestore(&pool->lock, flags); | ||
482 | return 0; | 492 | return 0; |
483 | err: | 493 | err: |
484 | spin_unlock_irqrestore(&pool->lock, flags); | 494 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); |
485 | return -EINVAL; | 495 | return -EINVAL; |
486 | } | 496 | } |
487 | 497 | ||
488 | static void d40_lcla_id_put(struct d40_chan *d40c, | ||
489 | struct d40_lcla_pool *pool, | ||
490 | int id) | ||
491 | { | ||
492 | unsigned long flags; | ||
493 | if (id < 0) | ||
494 | return; | ||
495 | |||
496 | d40c->lcla.src_id = -1; | ||
497 | d40c->lcla.dst_id = -1; | ||
498 | |||
499 | spin_lock_irqsave(&pool->lock, flags); | ||
500 | pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); | ||
501 | spin_unlock_irqrestore(&pool->lock, flags); | ||
502 | } | ||
503 | 498 | ||
504 | static int d40_channel_execute_command(struct d40_chan *d40c, | 499 | static int d40_channel_execute_command(struct d40_chan *d40c, |
505 | enum d40_command command) | 500 | enum d40_command command) |
@@ -567,6 +562,7 @@ done: | |||
567 | static void d40_term_all(struct d40_chan *d40c) | 562 | static void d40_term_all(struct d40_chan *d40c) |
568 | { | 563 | { |
569 | struct d40_desc *d40d; | 564 | struct d40_desc *d40d; |
565 | unsigned long flags; | ||
570 | 566 | ||
571 | /* Release active descriptors */ | 567 | /* Release active descriptors */ |
572 | while ((d40d = d40_first_active_get(d40c))) { | 568 | while ((d40d = d40_first_active_get(d40c))) { |
@@ -584,10 +580,17 @@ static void d40_term_all(struct d40_chan *d40c) | |||
584 | d40_desc_free(d40c, d40d); | 580 | d40_desc_free(d40c, d40d); |
585 | } | 581 | } |
586 | 582 | ||
587 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | 583 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
588 | d40c->lcla.src_id); | 584 | |
589 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | 585 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= |
590 | d40c->lcla.dst_id); | 586 | (~(0x1 << d40c->lcla.dst_id)); |
587 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= | ||
588 | (~(0x1 << d40c->lcla.src_id)); | ||
589 | |||
590 | d40c->lcla.src_id = -1; | ||
591 | d40c->lcla.dst_id = -1; | ||
592 | |||
593 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
591 | 594 | ||
592 | d40c->pending_tx = 0; | 595 | d40c->pending_tx = 0; |
593 | d40c->busy = false; | 596 | d40c->busy = false; |
@@ -703,7 +706,6 @@ static int d40_config_write(struct d40_chan *d40c) | |||
703 | 706 | ||
704 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 707 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) |
705 | { | 708 | { |
706 | |||
707 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { | 709 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { |
708 | d40_phy_lli_write(d40c->base->virtbase, | 710 | d40_phy_lli_write(d40c->base->virtbase, |
709 | d40c->phy_chan->num, | 711 | d40c->phy_chan->num, |
@@ -712,13 +714,24 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | |||
712 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { | 714 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { |
713 | struct d40_log_lli *src = d40d->lli_log.src; | 715 | struct d40_log_lli *src = d40d->lli_log.src; |
714 | struct d40_log_lli *dst = d40d->lli_log.dst; | 716 | struct d40_log_lli *dst = d40d->lli_log.dst; |
717 | int s; | ||
715 | 718 | ||
716 | src += d40d->lli_count; | 719 | src += d40d->lli_count; |
717 | dst += d40d->lli_count; | 720 | dst += d40d->lli_count; |
718 | d40_log_lli_write(d40c->lcpa, d40c->lcla.src, | 721 | s = d40_log_lli_write(d40c->lcpa, |
719 | d40c->lcla.dst, | 722 | d40c->lcla.src, d40c->lcla.dst, |
720 | dst, src, | 723 | dst, src, |
721 | d40c->base->plat_data->llis_per_log); | 724 | d40c->base->plat_data->llis_per_log); |
725 | |||
726 | /* If s equals to zero, the job is not linked */ | ||
727 | if (s > 0) { | ||
728 | (void) dma_map_single(d40c->base->dev, d40c->lcla.src, | ||
729 | s * sizeof(struct d40_log_lli), | ||
730 | DMA_TO_DEVICE); | ||
731 | (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, | ||
732 | s * sizeof(struct d40_log_lli), | ||
733 | DMA_TO_DEVICE); | ||
734 | } | ||
722 | } | 735 | } |
723 | d40d->lli_count += d40d->lli_tx_len; | 736 | d40d->lli_count += d40d->lli_tx_len; |
724 | } | 737 | } |
@@ -930,7 +943,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
930 | if (!il[row].is_error) | 943 | if (!il[row].is_error) |
931 | dma_tc_handle(d40c); | 944 | dma_tc_handle(d40c); |
932 | else | 945 | else |
933 | dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", | 946 | dev_err(base->dev, |
947 | "[%s] IRQ chan: %ld offset %d idx %d\n", | ||
934 | __func__, chan, il[row].offset, idx); | 948 | __func__, chan, il[row].offset, idx); |
935 | 949 | ||
936 | spin_unlock(&d40c->lock); | 950 | spin_unlock(&d40c->lock); |
@@ -1089,7 +1103,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1089 | int j; | 1103 | int j; |
1090 | int log_num; | 1104 | int log_num; |
1091 | bool is_src; | 1105 | bool is_src; |
1092 | bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 1106 | bool is_log = (d40c->dma_cfg.channel_type & |
1107 | STEDMA40_CHANNEL_IN_OPER_MODE) | ||
1093 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 1108 | == STEDMA40_CHANNEL_IN_LOG_MODE; |
1094 | 1109 | ||
1095 | 1110 | ||
@@ -1124,8 +1139,10 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1124 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1139 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1125 | int phy_num = j + event_group * 2; | 1140 | int phy_num = j + event_group * 2; |
1126 | for (i = phy_num; i < phy_num + 2; i++) { | 1141 | for (i = phy_num; i < phy_num + 2; i++) { |
1127 | if (d40_alloc_mask_set(&phys[i], is_src, | 1142 | if (d40_alloc_mask_set(&phys[i], |
1128 | 0, is_log)) | 1143 | is_src, |
1144 | 0, | ||
1145 | is_log)) | ||
1129 | goto found_phy; | 1146 | goto found_phy; |
1130 | } | 1147 | } |
1131 | } | 1148 | } |
@@ -1396,13 +1413,14 @@ static u32 d40_residue(struct d40_chan *d40c) | |||
1396 | u32 num_elt; | 1413 | u32 num_elt; |
1397 | 1414 | ||
1398 | if (d40c->log_num != D40_PHY_CHAN) | 1415 | if (d40c->log_num != D40_PHY_CHAN) |
1399 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | 1416 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
1400 | >> D40_MEM_LCSP2_ECNT_POS; | 1417 | >> D40_MEM_LCSP2_ECNT_POS; |
1401 | else | 1418 | else |
1402 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1419 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + |
1403 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 1420 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
1404 | D40_CHAN_REG_SDELT) & | 1421 | D40_CHAN_REG_SDELT) & |
1405 | D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; | 1422 | D40_SREG_ELEM_PHY_ECNT_MASK) >> |
1423 | D40_SREG_ELEM_PHY_ECNT_POS; | ||
1406 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | 1424 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
1407 | } | 1425 | } |
1408 | 1426 | ||
@@ -1455,8 +1473,10 @@ int stedma40_set_psize(struct dma_chan *chan, | |||
1455 | if (d40c->log_num != D40_PHY_CHAN) { | 1473 | if (d40c->log_num != D40_PHY_CHAN) { |
1456 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1474 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; |
1457 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1475 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; |
1458 | d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 1476 | d40c->log_def.lcsp1 |= src_psize << |
1459 | d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 1477 | D40_MEM_LCSP1_SCFG_PSIZE_POS; |
1478 | d40c->log_def.lcsp3 |= dst_psize << | ||
1479 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | ||
1460 | goto out; | 1480 | goto out; |
1461 | } | 1481 | } |
1462 | 1482 | ||
@@ -1521,8 +1541,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1521 | * split list into 1-length and run only in lcpa | 1541 | * split list into 1-length and run only in lcpa |
1522 | * space. | 1542 | * space. |
1523 | */ | 1543 | */ |
1524 | if (d40_lcla_id_get(d40c, | 1544 | if (d40_lcla_id_get(d40c) != 0) |
1525 | &d40c->base->lcla_pool) != 0) | ||
1526 | d40d->lli_tx_len = 1; | 1545 | d40d->lli_tx_len = 1; |
1527 | 1546 | ||
1528 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 1547 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { |
@@ -1849,7 +1868,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |||
1849 | * If not, split list into 1-length and run only | 1868 | * If not, split list into 1-length and run only |
1850 | * in lcpa space. | 1869 | * in lcpa space. |
1851 | */ | 1870 | */ |
1852 | if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) | 1871 | if (d40_lcla_id_get(d40c) != 0) |
1853 | d40d->lli_tx_len = 1; | 1872 | d40d->lli_tx_len = 1; |
1854 | 1873 | ||
1855 | if (direction == DMA_FROM_DEVICE) | 1874 | if (direction == DMA_FROM_DEVICE) |
@@ -2476,6 +2495,78 @@ static void __init d40_hw_init(struct d40_base *base) | |||
2476 | 2495 | ||
2477 | } | 2496 | } |
2478 | 2497 | ||
2498 | static int __init d40_lcla_allocate(struct d40_base *base) | ||
2499 | { | ||
2500 | unsigned long *page_list; | ||
2501 | int i, j; | ||
2502 | int ret = 0; | ||
2503 | |||
2504 | /* | ||
2505 | * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, | ||
2506 | * To full fill this hardware requirement without wasting 256 kb | ||
2507 | * we allocate pages until we get an aligned one. | ||
2508 | */ | ||
2509 | page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, | ||
2510 | GFP_KERNEL); | ||
2511 | |||
2512 | if (!page_list) { | ||
2513 | ret = -ENOMEM; | ||
2514 | goto failure; | ||
2515 | } | ||
2516 | |||
2517 | /* Calculating how many pages that are required */ | ||
2518 | base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; | ||
2519 | |||
2520 | for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { | ||
2521 | page_list[i] = __get_free_pages(GFP_KERNEL, | ||
2522 | base->lcla_pool.pages); | ||
2523 | if (!page_list[i]) { | ||
2524 | |||
2525 | dev_err(base->dev, | ||
2526 | "[%s] Failed to allocate %d pages.\n", | ||
2527 | __func__, base->lcla_pool.pages); | ||
2528 | |||
2529 | for (j = 0; j < i; j++) | ||
2530 | free_pages(page_list[j], base->lcla_pool.pages); | ||
2531 | goto failure; | ||
2532 | } | ||
2533 | |||
2534 | if ((virt_to_phys((void *)page_list[i]) & | ||
2535 | (LCLA_ALIGNMENT - 1)) == 0) | ||
2536 | break; | ||
2537 | } | ||
2538 | |||
2539 | for (j = 0; j < i; j++) | ||
2540 | free_pages(page_list[j], base->lcla_pool.pages); | ||
2541 | |||
2542 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | ||
2543 | base->lcla_pool.base = (void *)page_list[i]; | ||
2544 | } else { | ||
2545 | /* After many attempts, no succees with finding the correct | ||
2546 | * alignment try with allocating a big buffer */ | ||
2547 | dev_warn(base->dev, | ||
2548 | "[%s] Failed to get %d pages @ 18 bit align.\n", | ||
2549 | __func__, base->lcla_pool.pages); | ||
2550 | base->lcla_pool.base_unaligned = kmalloc(SZ_1K * | ||
2551 | base->num_phy_chans + | ||
2552 | LCLA_ALIGNMENT, | ||
2553 | GFP_KERNEL); | ||
2554 | if (!base->lcla_pool.base_unaligned) { | ||
2555 | ret = -ENOMEM; | ||
2556 | goto failure; | ||
2557 | } | ||
2558 | |||
2559 | base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, | ||
2560 | LCLA_ALIGNMENT); | ||
2561 | } | ||
2562 | |||
2563 | writel(virt_to_phys(base->lcla_pool.base), | ||
2564 | base->virtbase + D40_DREG_LCLA); | ||
2565 | failure: | ||
2566 | kfree(page_list); | ||
2567 | return ret; | ||
2568 | } | ||
2569 | |||
2479 | static int __init d40_probe(struct platform_device *pdev) | 2570 | static int __init d40_probe(struct platform_device *pdev) |
2480 | { | 2571 | { |
2481 | int err; | 2572 | int err; |
@@ -2535,41 +2626,11 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2535 | __func__); | 2626 | __func__); |
2536 | goto failure; | 2627 | goto failure; |
2537 | } | 2628 | } |
2538 | /* Get IO for logical channel link address */ | ||
2539 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); | ||
2540 | if (!res) { | ||
2541 | ret = -ENOENT; | ||
2542 | dev_err(&pdev->dev, | ||
2543 | "[%s] No \"lcla\" resource defined\n", | ||
2544 | __func__); | ||
2545 | goto failure; | ||
2546 | } | ||
2547 | 2629 | ||
2548 | base->lcla_pool.base_size = resource_size(res); | 2630 | ret = d40_lcla_allocate(base); |
2549 | base->lcla_pool.phy = res->start; | 2631 | if (ret) { |
2550 | 2632 | dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", | |
2551 | if (request_mem_region(res->start, resource_size(res), | 2633 | __func__); |
2552 | D40_NAME " I/O lcla") == NULL) { | ||
2553 | ret = -EBUSY; | ||
2554 | dev_err(&pdev->dev, | ||
2555 | "[%s] Failed to request LCLA region 0x%x-0x%x\n", | ||
2556 | __func__, res->start, res->end); | ||
2557 | goto failure; | ||
2558 | } | ||
2559 | val = readl(base->virtbase + D40_DREG_LCLA); | ||
2560 | if (res->start != val && val != 0) { | ||
2561 | dev_warn(&pdev->dev, | ||
2562 | "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", | ||
2563 | __func__, val, res->start); | ||
2564 | } else | ||
2565 | writel(res->start, base->virtbase + D40_DREG_LCLA); | ||
2566 | |||
2567 | base->lcla_pool.base = ioremap(res->start, resource_size(res)); | ||
2568 | if (!base->lcla_pool.base) { | ||
2569 | ret = -ENOMEM; | ||
2570 | dev_err(&pdev->dev, | ||
2571 | "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", | ||
2572 | __func__, res->start, res->end); | ||
2573 | goto failure; | 2634 | goto failure; |
2574 | } | 2635 | } |
2575 | 2636 | ||
@@ -2601,9 +2662,11 @@ failure: | |||
2601 | kmem_cache_destroy(base->desc_slab); | 2662 | kmem_cache_destroy(base->desc_slab); |
2602 | if (base->virtbase) | 2663 | if (base->virtbase) |
2603 | iounmap(base->virtbase); | 2664 | iounmap(base->virtbase); |
2604 | if (base->lcla_pool.phy) | 2665 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
2605 | release_mem_region(base->lcla_pool.phy, | 2666 | free_pages((unsigned long)base->lcla_pool.base, |
2606 | base->lcla_pool.base_size); | 2667 | base->lcla_pool.pages); |
2668 | if (base->lcla_pool.base_unaligned) | ||
2669 | kfree(base->lcla_pool.base_unaligned); | ||
2607 | if (base->phy_lcpa) | 2670 | if (base->phy_lcpa) |
2608 | release_mem_region(base->phy_lcpa, | 2671 | release_mem_region(base->phy_lcpa, |
2609 | base->lcpa_size); | 2672 | base->lcpa_size); |
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 772636be13eb..d937f76d6e2e 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -420,7 +420,7 @@ int d40_log_sg_to_lli(int lcla_id, | |||
420 | return total_size; | 420 | return total_size; |
421 | } | 421 | } |
422 | 422 | ||
423 | void d40_log_lli_write(struct d40_log_lli_full *lcpa, | 423 | int d40_log_lli_write(struct d40_log_lli_full *lcpa, |
424 | struct d40_log_lli *lcla_src, | 424 | struct d40_log_lli *lcla_src, |
425 | struct d40_log_lli *lcla_dst, | 425 | struct d40_log_lli *lcla_dst, |
426 | struct d40_log_lli *lli_dst, | 426 | struct d40_log_lli *lli_dst, |
@@ -448,4 +448,7 @@ void d40_log_lli_write(struct d40_log_lli_full *lcpa, | |||
448 | slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; | 448 | slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; |
449 | dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; | 449 | dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; |
450 | } | 450 | } |
451 | |||
452 | return i; | ||
453 | |||
451 | } | 454 | } |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index c081f28ec1e3..9c0fa2f5fe57 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -339,12 +339,12 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, | |||
339 | bool term_int, dma_addr_t dev_addr, int max_len, | 339 | bool term_int, dma_addr_t dev_addr, int max_len, |
340 | int llis_per_log); | 340 | int llis_per_log); |
341 | 341 | ||
342 | void d40_log_lli_write(struct d40_log_lli_full *lcpa, | 342 | int d40_log_lli_write(struct d40_log_lli_full *lcpa, |
343 | struct d40_log_lli *lcla_src, | 343 | struct d40_log_lli *lcla_src, |
344 | struct d40_log_lli *lcla_dst, | 344 | struct d40_log_lli *lcla_dst, |
345 | struct d40_log_lli *lli_dst, | 345 | struct d40_log_lli *lli_dst, |
346 | struct d40_log_lli *lli_src, | 346 | struct d40_log_lli *lli_src, |
347 | int llis_per_log); | 347 | int llis_per_log); |
348 | 348 | ||
349 | int d40_log_sg_to_lli(int lcla_id, | 349 | int d40_log_sg_to_lli(int lcla_id, |
350 | struct scatterlist *sg, | 350 | struct scatterlist *sg, |