diff options
author | David S. Miller <davem@davemloft.net> | 2015-10-20 09:08:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-20 09:08:27 -0400 |
commit | 26440c835f8b1a491e2704118ac55bf87334366c (patch) | |
tree | 3c2d23b59fd49b252fdbf6c09efc41b354933fc6 /drivers/dma | |
parent | 371f1c7e0d854796adc622cc3bacfcc5fc638db1 (diff) | |
parent | 1099f86044111e9a7807f09523e42d4c9d0fb781 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/asix_common.c
net/ipv4/inet_connection_sock.c
net/switchdev/switchdev.c
In the inet_connection_sock.c case the request socket hashing scheme
is completely different in net-next.
The other two conflicts were overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/at_xdmac.c | 15 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 10 | ||||
-rw-r--r-- | drivers/dma/dw/core.c | 4 | ||||
-rw-r--r-- | drivers/dma/idma64.c | 16 | ||||
-rw-r--r-- | drivers/dma/pxa_dma.c | 31 | ||||
-rw-r--r-- | drivers/dma/sun4i-dma.c | 6 | ||||
-rw-r--r-- | drivers/dma/xgene-dma.c | 46 | ||||
-rw-r--r-- | drivers/dma/zx296702_dma.c | 2 |
8 files changed, 77 insertions, 53 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, | |||
455 | return desc; | 455 | return desc; |
456 | } | 456 | } |
457 | 457 | ||
458 | void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) | ||
459 | { | ||
460 | memset(&desc->lld, 0, sizeof(desc->lld)); | ||
461 | INIT_LIST_HEAD(&desc->descs_list); | ||
462 | desc->direction = DMA_TRANS_NONE; | ||
463 | desc->xfer_size = 0; | ||
464 | desc->active_xfer = false; | ||
465 | } | ||
466 | |||
458 | /* Call must be protected by lock. */ | 467 | /* Call must be protected by lock. */ |
459 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | 468 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) |
460 | { | 469 | { |
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | |||
466 | desc = list_first_entry(&atchan->free_descs_list, | 475 | desc = list_first_entry(&atchan->free_descs_list, |
467 | struct at_xdmac_desc, desc_node); | 476 | struct at_xdmac_desc, desc_node); |
468 | list_del(&desc->desc_node); | 477 | list_del(&desc->desc_node); |
469 | desc->active_xfer = false; | 478 | at_xdmac_init_used_desc(desc); |
470 | } | 479 | } |
471 | 480 | ||
472 | return desc; | 481 | return desc; |
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |||
875 | 884 | ||
876 | if (xt->src_inc) { | 885 | if (xt->src_inc) { |
877 | if (xt->src_sgl) | 886 | if (xt->src_sgl) |
878 | chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; | 887 | chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; |
879 | else | 888 | else |
880 | chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; | 889 | chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; |
881 | } | 890 | } |
882 | 891 | ||
883 | if (xt->dst_inc) { | 892 | if (xt->dst_inc) { |
884 | if (xt->dst_sgl) | 893 | if (xt->dst_sgl) |
885 | chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; | 894 | chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; |
886 | else | 895 | else |
887 | chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; | 896 | chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; |
888 | } | 897 | } |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |||
554 | mutex_lock(&dma_list_mutex); | 554 | mutex_lock(&dma_list_mutex); |
555 | 555 | ||
556 | if (chan->client_count == 0) { | 556 | if (chan->client_count == 0) { |
557 | struct dma_device *device = chan->device; | ||
558 | |||
559 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
560 | device->privatecnt++; | ||
557 | err = dma_chan_get(chan); | 561 | err = dma_chan_get(chan); |
558 | if (err) | 562 | if (err) { |
559 | pr_debug("%s: failed to get %s: (%d)\n", | 563 | pr_debug("%s: failed to get %s: (%d)\n", |
560 | __func__, dma_chan_name(chan), err); | 564 | __func__, dma_chan_name(chan), err); |
565 | chan = NULL; | ||
566 | if (--device->privatecnt == 0) | ||
567 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
568 | } | ||
561 | } else | 569 | } else |
562 | chan = NULL; | 570 | chan = NULL; |
563 | 571 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1591 | INIT_LIST_HEAD(&dw->dma.channels); | 1591 | INIT_LIST_HEAD(&dw->dma.channels); |
1592 | for (i = 0; i < nr_channels; i++) { | 1592 | for (i = 0; i < nr_channels; i++) { |
1593 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1593 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1594 | int r = nr_channels - i - 1; | ||
1595 | 1594 | ||
1596 | dwc->chan.device = &dw->dma; | 1595 | dwc->chan.device = &dw->dma; |
1597 | dma_cookie_init(&dwc->chan); | 1596 | dma_cookie_init(&dwc->chan); |
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1603 | 1602 | ||
1604 | /* 7 is highest priority & 0 is lowest. */ | 1603 | /* 7 is highest priority & 0 is lowest. */ |
1605 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1604 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1606 | dwc->priority = r; | 1605 | dwc->priority = nr_channels - i - 1; |
1607 | else | 1606 | else |
1608 | dwc->priority = i; | 1607 | dwc->priority = i; |
1609 | 1608 | ||
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1622 | /* Hardware configuration */ | 1621 | /* Hardware configuration */ |
1623 | if (autocfg) { | 1622 | if (autocfg) { |
1624 | unsigned int dwc_params; | 1623 | unsigned int dwc_params; |
1624 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; | ||
1625 | void __iomem *addr = chip->regs + r * sizeof(u32); | 1625 | void __iomem *addr = chip->regs + r * sizeof(u32); |
1626 | 1626 | ||
1627 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); | 1627 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); |
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c | |||
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) | |||
355 | struct idma64_desc *desc = idma64c->desc; | 355 | struct idma64_desc *desc = idma64c->desc; |
356 | struct idma64_hw_desc *hw; | 356 | struct idma64_hw_desc *hw; |
357 | size_t bytes = desc->length; | 357 | size_t bytes = desc->length; |
358 | u64 llp; | 358 | u64 llp = channel_readq(idma64c, LLP); |
359 | u32 ctlhi; | 359 | u32 ctlhi = channel_readl(idma64c, CTL_HI); |
360 | unsigned int i = 0; | 360 | unsigned int i = 0; |
361 | 361 | ||
362 | llp = channel_readq(idma64c, LLP); | ||
363 | do { | 362 | do { |
364 | hw = &desc->hw[i]; | 363 | hw = &desc->hw[i]; |
365 | } while ((hw->llp != llp) && (++i < desc->ndesc)); | 364 | if (hw->llp == llp) |
365 | break; | ||
366 | bytes -= hw->len; | ||
367 | } while (++i < desc->ndesc); | ||
366 | 368 | ||
367 | if (!i) | 369 | if (!i) |
368 | return bytes; | 370 | return bytes; |
369 | 371 | ||
370 | do { | 372 | /* The current chunk is not fully transfered yet */ |
371 | bytes -= desc->hw[--i].len; | 373 | bytes += desc->hw[--i].len; |
372 | } while (i); | ||
373 | 374 | ||
374 | ctlhi = channel_readl(idma64c, CTL_HI); | ||
375 | return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); | 375 | return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); |
376 | } | 376 | } |
377 | 377 | ||
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) | |||
473 | return; | 473 | return; |
474 | 474 | ||
475 | /* clear the channel mapping in DRCMR */ | 475 | /* clear the channel mapping in DRCMR */ |
476 | reg = pxad_drcmr(chan->drcmr); | 476 | if (chan->drcmr <= DRCMR_CHLNUM) { |
477 | writel_relaxed(0, chan->phy->base + reg); | 477 | reg = pxad_drcmr(chan->drcmr); |
478 | writel_relaxed(0, chan->phy->base + reg); | ||
479 | } | ||
478 | 480 | ||
479 | spin_lock_irqsave(&pdev->phy_lock, flags); | 481 | spin_lock_irqsave(&pdev->phy_lock, flags); |
480 | for (i = 0; i < 32; i++) | 482 | for (i = 0; i < 32; i++) |
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) | |||
516 | "%s(); phy=%p(%d) misaligned=%d\n", __func__, | 518 | "%s(); phy=%p(%d) misaligned=%d\n", __func__, |
517 | phy, phy->idx, misaligned); | 519 | phy, phy->idx, misaligned); |
518 | 520 | ||
519 | reg = pxad_drcmr(phy->vchan->drcmr); | 521 | if (phy->vchan->drcmr <= DRCMR_CHLNUM) { |
520 | writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); | 522 | reg = pxad_drcmr(phy->vchan->drcmr); |
523 | writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); | ||
524 | } | ||
521 | 525 | ||
522 | dalgn = phy_readl_relaxed(phy, DALGN); | 526 | dalgn = phy_readl_relaxed(phy, DALGN); |
523 | if (misaligned) | 527 | if (misaligned) |
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, | |||
887 | struct dma_async_tx_descriptor *tx; | 891 | struct dma_async_tx_descriptor *tx; |
888 | struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); | 892 | struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); |
889 | 893 | ||
894 | INIT_LIST_HEAD(&vd->node); | ||
890 | tx = vchan_tx_prep(vc, vd, tx_flags); | 895 | tx = vchan_tx_prep(vc, vd, tx_flags); |
891 | tx->tx_submit = pxad_tx_submit; | 896 | tx->tx_submit = pxad_tx_submit; |
892 | dev_dbg(&chan->vc.chan.dev->device, | 897 | dev_dbg(&chan->vc.chan.dev->device, |
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, | |||
910 | width = chan->cfg.src_addr_width; | 915 | width = chan->cfg.src_addr_width; |
911 | dev_addr = chan->cfg.src_addr; | 916 | dev_addr = chan->cfg.src_addr; |
912 | *dev_src = dev_addr; | 917 | *dev_src = dev_addr; |
913 | *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; | 918 | *dcmd |= PXA_DCMD_INCTRGADDR; |
919 | if (chan->drcmr <= DRCMR_CHLNUM) | ||
920 | *dcmd |= PXA_DCMD_FLOWSRC; | ||
914 | } | 921 | } |
915 | if (dir == DMA_MEM_TO_DEV) { | 922 | if (dir == DMA_MEM_TO_DEV) { |
916 | maxburst = chan->cfg.dst_maxburst; | 923 | maxburst = chan->cfg.dst_maxburst; |
917 | width = chan->cfg.dst_addr_width; | 924 | width = chan->cfg.dst_addr_width; |
918 | dev_addr = chan->cfg.dst_addr; | 925 | dev_addr = chan->cfg.dst_addr; |
919 | *dev_dst = dev_addr; | 926 | *dev_dst = dev_addr; |
920 | *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; | 927 | *dcmd |= PXA_DCMD_INCSRCADDR; |
928 | if (chan->drcmr <= DRCMR_CHLNUM) | ||
929 | *dcmd |= PXA_DCMD_FLOWTRG; | ||
921 | } | 930 | } |
922 | if (dir == DMA_MEM_TO_MEM) | 931 | if (dir == DMA_MEM_TO_MEM) |
923 | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | | 932 | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, | |||
1177 | else | 1186 | else |
1178 | curr = phy_readl_relaxed(chan->phy, DTADR); | 1187 | curr = phy_readl_relaxed(chan->phy, DTADR); |
1179 | 1188 | ||
1189 | /* | ||
1190 | * curr has to be actually read before checking descriptor | ||
1191 | * completion, so that a curr inside a status updater | ||
1192 | * descriptor implies the following test returns true, and | ||
1193 | * preventing reordering of curr load and the test. | ||
1194 | */ | ||
1195 | rmb(); | ||
1196 | if (is_desc_completed(vd)) | ||
1197 | goto out; | ||
1198 | |||
1180 | for (i = 0; i < sw_desc->nb_desc - 1; i++) { | 1199 | for (i = 0; i < sw_desc->nb_desc - 1; i++) { |
1181 | hw_desc = sw_desc->hw_desc[i]; | 1200 | hw_desc = sw_desc->hw_desc[i]; |
1182 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) | 1201 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) |
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c | |||
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) | |||
599 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) | 599 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) |
600 | { | 600 | { |
601 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); | 601 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); |
602 | struct sun4i_dma_promise *promise; | 602 | struct sun4i_dma_promise *promise, *tmp; |
603 | 603 | ||
604 | /* Free all the demands and completed demands */ | 604 | /* Free all the demands and completed demands */ |
605 | list_for_each_entry(promise, &contract->demands, list) | 605 | list_for_each_entry_safe(promise, tmp, &contract->demands, list) |
606 | kfree(promise); | 606 | kfree(promise); |
607 | 607 | ||
608 | list_for_each_entry(promise, &contract->completed_demands, list) | 608 | list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) |
609 | kfree(promise); | 609 | kfree(promise); |
610 | 610 | ||
611 | kfree(contract); | 611 | kfree(contract); |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -59,7 +59,6 @@ | |||
59 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 | 59 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 |
60 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 | 60 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 |
61 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF | 61 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF |
62 | #define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) | ||
63 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) | 62 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) |
64 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) | 63 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) |
65 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C | 64 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C |
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) | |||
379 | return flyby_type[src_cnt]; | 378 | return flyby_type[src_cnt]; |
380 | } | 379 | } |
381 | 380 | ||
382 | static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) | ||
383 | { | ||
384 | u32 __iomem *cmd_base = ring->cmd_base; | ||
385 | u32 ring_state = ioread32(&cmd_base[1]); | ||
386 | |||
387 | return XGENE_DMA_RING_DESC_CNT(ring_state); | ||
388 | } | ||
389 | |||
390 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, | 381 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, |
391 | dma_addr_t *paddr) | 382 | dma_addr_t *paddr) |
392 | { | 383 | { |
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, | |||
659 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | 650 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); |
660 | } | 651 | } |
661 | 652 | ||
662 | static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | 653 | static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, |
663 | struct xgene_dma_desc_sw *desc_sw) | 654 | struct xgene_dma_desc_sw *desc_sw) |
664 | { | 655 | { |
656 | struct xgene_dma_ring *ring = &chan->tx_ring; | ||
665 | struct xgene_dma_desc_hw *desc_hw; | 657 | struct xgene_dma_desc_hw *desc_hw; |
666 | 658 | ||
667 | /* Check if can push more descriptor to hw for execution */ | ||
668 | if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) | ||
669 | return -EBUSY; | ||
670 | |||
671 | /* Get hw descriptor from DMA tx ring */ | 659 | /* Get hw descriptor from DMA tx ring */ |
672 | desc_hw = &ring->desc_hw[ring->head]; | 660 | desc_hw = &ring->desc_hw[ring->head]; |
673 | 661 | ||
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |||
694 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); | 682 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); |
695 | } | 683 | } |
696 | 684 | ||
685 | /* Increment the pending transaction count */ | ||
686 | chan->pending += ((desc_sw->flags & | ||
687 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | ||
688 | |||
697 | /* Notify the hw that we have descriptor ready for execution */ | 689 | /* Notify the hw that we have descriptor ready for execution */ |
698 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? | 690 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? |
699 | 2 : 1, ring->cmd); | 691 | 2 : 1, ring->cmd); |
700 | |||
701 | return 0; | ||
702 | } | 692 | } |
703 | 693 | ||
704 | /** | 694 | /** |
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |||
710 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | 700 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) |
711 | { | 701 | { |
712 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | 702 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; |
713 | int ret; | ||
714 | 703 | ||
715 | /* | 704 | /* |
716 | * If the list of pending descriptors is empty, then we | 705 | * If the list of pending descriptors is empty, then we |
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | |||
735 | if (chan->pending >= chan->max_outstanding) | 724 | if (chan->pending >= chan->max_outstanding) |
736 | return; | 725 | return; |
737 | 726 | ||
738 | ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); | 727 | xgene_chan_xfer_request(chan, desc_sw); |
739 | if (ret) | ||
740 | return; | ||
741 | 728 | ||
742 | /* | 729 | /* |
743 | * Delete this element from ld pending queue and append it to | 730 | * Delete this element from ld pending queue and append it to |
744 | * ld running queue | 731 | * ld running queue |
745 | */ | 732 | */ |
746 | list_move_tail(&desc_sw->node, &chan->ld_running); | 733 | list_move_tail(&desc_sw->node, &chan->ld_running); |
747 | |||
748 | /* Increment the pending transaction count */ | ||
749 | chan->pending++; | ||
750 | } | 734 | } |
751 | } | 735 | } |
752 | 736 | ||
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | |||
821 | * Decrement the pending transaction count | 805 | * Decrement the pending transaction count |
822 | * as we have processed one | 806 | * as we have processed one |
823 | */ | 807 | */ |
824 | chan->pending--; | 808 | chan->pending -= ((desc_sw->flags & |
809 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | ||
825 | 810 | ||
826 | /* | 811 | /* |
827 | * Delete this node from ld running queue and append it to | 812 | * Delete this node from ld running queue and append it to |
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | |||
1421 | struct xgene_dma_ring *ring, | 1406 | struct xgene_dma_ring *ring, |
1422 | enum xgene_dma_ring_cfgsize cfgsize) | 1407 | enum xgene_dma_ring_cfgsize cfgsize) |
1423 | { | 1408 | { |
1409 | int ret; | ||
1410 | |||
1424 | /* Setup DMA ring descriptor variables */ | 1411 | /* Setup DMA ring descriptor variables */ |
1425 | ring->pdma = chan->pdma; | 1412 | ring->pdma = chan->pdma; |
1426 | ring->cfgsize = cfgsize; | 1413 | ring->cfgsize = cfgsize; |
1427 | ring->num = chan->pdma->ring_num++; | 1414 | ring->num = chan->pdma->ring_num++; |
1428 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); | 1415 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); |
1429 | 1416 | ||
1430 | ring->size = xgene_dma_get_ring_size(chan, cfgsize); | 1417 | ret = xgene_dma_get_ring_size(chan, cfgsize); |
1431 | if (ring->size <= 0) | 1418 | if (ret <= 0) |
1432 | return ring->size; | 1419 | return ret; |
1420 | ring->size = ret; | ||
1433 | 1421 | ||
1434 | /* Allocate memory for DMA ring descriptor */ | 1422 | /* Allocate memory for DMA ring descriptor */ |
1435 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, | 1423 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, |
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) | |||
1482 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); | 1470 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); |
1483 | 1471 | ||
1484 | /* Set the max outstanding request possible to this channel */ | 1472 | /* Set the max outstanding request possible to this channel */ |
1485 | chan->max_outstanding = rx_ring->slots; | 1473 | chan->max_outstanding = tx_ring->slots; |
1486 | 1474 | ||
1487 | return ret; | 1475 | return ret; |
1488 | } | 1476 | } |
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c | |||
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
739 | struct dma_chan *chan; | 739 | struct dma_chan *chan; |
740 | struct zx_dma_chan *c; | 740 | struct zx_dma_chan *c; |
741 | 741 | ||
742 | if (request > d->dma_requests) | 742 | if (request >= d->dma_requests) |
743 | return NULL; | 743 | return NULL; |
744 | 744 | ||
745 | chan = dma_get_any_slave_channel(&d->slave); | 745 | chan = dma_get_any_slave_channel(&d->slave); |