diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-16 18:52:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-16 18:52:38 -0400 |
commit | 306a63bee192859ebd32c7328c7766636d882d8f (patch) | |
tree | 62d5344b0d7d2df6f0a629a2297fff9d0e264e02 | |
parent | ac82a57aff853599db757f666204ac8d2af4b26b (diff) | |
parent | 956e6c8e18fa666ccc118c85fb32f92ebde3baf1 (diff) |
Merge tag 'dmaengine-fix-4.6-rc4' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine fixes from Vinod Koul:
"This time we have some odd fixes in hsu, edma, omap and xilinx.
Usual fixes and nothing special"
* tag 'dmaengine-fix-4.6-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: dw: fix master selection
dmaengine: edma: special case slot limit workaround
dmaengine: edma: Remove dynamic TPTC power management feature
dmaengine: vdma: don't crash when bad channel is requested
dmaengine: omap-dma: Do not suppress interrupts for memcpy
dmaengine: omap-dma: Fix polled channel completion detection and handling
dmaengine: hsu: correct use of channel status register
dmaengine: hsu: correct residue calculation of active descriptor
dmaengine: hsu: set HSU_CH_MTSR to memory width
-rw-r--r-- | drivers/dma/dw/core.c | 34 | ||||
-rw-r--r-- | drivers/dma/edma.c | 63 | ||||
-rw-r--r-- | drivers/dma/hsu/hsu.c | 13 | ||||
-rw-r--r-- | drivers/dma/hsu/hsu.h | 3 | ||||
-rw-r--r-- | drivers/dma/omap-dma.c | 26 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_vdma.c | 2 |
6 files changed, 73 insertions, 68 deletions
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 5ad0ec1f0e29..97199b3c25a2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
130 | static void dwc_initialize(struct dw_dma_chan *dwc) | 130 | static void dwc_initialize(struct dw_dma_chan *dwc) |
131 | { | 131 | { |
132 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 132 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
133 | struct dw_dma_slave *dws = dwc->chan.private; | ||
134 | u32 cfghi = DWC_CFGH_FIFO_MODE; | 133 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
135 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | 134 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); |
136 | 135 | ||
137 | if (dwc->initialized == true) | 136 | if (dwc->initialized == true) |
138 | return; | 137 | return; |
139 | 138 | ||
140 | if (dws) { | 139 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
141 | /* | 140 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); |
142 | * We need controller-specific data to set up slave | ||
143 | * transfers. | ||
144 | */ | ||
145 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
146 | |||
147 | cfghi |= DWC_CFGH_DST_PER(dws->dst_id); | ||
148 | cfghi |= DWC_CFGH_SRC_PER(dws->src_id); | ||
149 | } else { | ||
150 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); | ||
151 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); | ||
152 | } | ||
153 | 141 | ||
154 | channel_writel(dwc, CFG_LO, cfglo); | 142 | channel_writel(dwc, CFG_LO, cfglo); |
155 | channel_writel(dwc, CFG_HI, cfghi); | 143 | channel_writel(dwc, CFG_HI, cfghi); |
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) | |||
941 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 929 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
942 | struct dw_dma_slave *dws = param; | 930 | struct dw_dma_slave *dws = param; |
943 | 931 | ||
944 | if (!dws || dws->dma_dev != chan->device->dev) | 932 | if (dws->dma_dev != chan->device->dev) |
945 | return false; | 933 | return false; |
946 | 934 | ||
947 | /* We have to copy data since dws can be temporary storage */ | 935 | /* We have to copy data since dws can be temporary storage */ |
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1165 | * doesn't mean what you think it means), and status writeback. | 1153 | * doesn't mean what you think it means), and status writeback. |
1166 | */ | 1154 | */ |
1167 | 1155 | ||
1156 | /* | ||
1157 | * We need controller-specific data to set up slave transfers. | ||
1158 | */ | ||
1159 | if (chan->private && !dw_dma_filter(chan, chan->private)) { | ||
1160 | dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); | ||
1161 | return -EINVAL; | ||
1162 | } | ||
1163 | |||
1168 | /* Enable controller here if needed */ | 1164 | /* Enable controller here if needed */ |
1169 | if (!dw->in_use) | 1165 | if (!dw->in_use) |
1170 | dw_dma_on(dw); | 1166 | dw_dma_on(dw); |
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1226 | spin_lock_irqsave(&dwc->lock, flags); | 1222 | spin_lock_irqsave(&dwc->lock, flags); |
1227 | list_splice_init(&dwc->free_list, &list); | 1223 | list_splice_init(&dwc->free_list, &list); |
1228 | dwc->descs_allocated = 0; | 1224 | dwc->descs_allocated = 0; |
1225 | |||
1226 | /* Clear custom channel configuration */ | ||
1227 | dwc->src_id = 0; | ||
1228 | dwc->dst_id = 0; | ||
1229 | |||
1230 | dwc->src_master = 0; | ||
1231 | dwc->dst_master = 0; | ||
1232 | |||
1229 | dwc->initialized = false; | 1233 | dwc->initialized = false; |
1230 | 1234 | ||
1231 | /* Disable interrupts */ | 1235 | /* Disable interrupts */ |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index ee3463e774f8..04070baab78a 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
1238 | struct edma_desc *edesc; | 1238 | struct edma_desc *edesc; |
1239 | dma_addr_t src_addr, dst_addr; | 1239 | dma_addr_t src_addr, dst_addr; |
1240 | enum dma_slave_buswidth dev_width; | 1240 | enum dma_slave_buswidth dev_width; |
1241 | bool use_intermediate = false; | ||
1241 | u32 burst; | 1242 | u32 burst; |
1242 | int i, ret, nslots; | 1243 | int i, ret, nslots; |
1243 | 1244 | ||
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
1279 | * but the synchronization is difficult to achieve with Cyclic and | 1280 | * but the synchronization is difficult to achieve with Cyclic and |
1280 | * cannot be guaranteed, so we error out early. | 1281 | * cannot be guaranteed, so we error out early. |
1281 | */ | 1282 | */ |
1282 | if (nslots > MAX_NR_SG) | 1283 | if (nslots > MAX_NR_SG) { |
1283 | return NULL; | 1284 | /* |
1285 | * If the burst and period sizes are the same, we can put | ||
1286 | * the full buffer into a single period and activate | ||
1287 | * intermediate interrupts. This will produce interrupts | ||
1288 | * after each burst, which is also after each desired period. | ||
1289 | */ | ||
1290 | if (burst == period_len) { | ||
1291 | period_len = buf_len; | ||
1292 | nslots = 2; | ||
1293 | use_intermediate = true; | ||
1294 | } else { | ||
1295 | return NULL; | ||
1296 | } | ||
1297 | } | ||
1284 | 1298 | ||
1285 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), | 1299 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), |
1286 | GFP_ATOMIC); | 1300 | GFP_ATOMIC); |
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
1358 | /* | 1372 | /* |
1359 | * Enable period interrupt only if it is requested | 1373 | * Enable period interrupt only if it is requested |
1360 | */ | 1374 | */ |
1361 | if (tx_flags & DMA_PREP_INTERRUPT) | 1375 | if (tx_flags & DMA_PREP_INTERRUPT) { |
1362 | edesc->pset[i].param.opt |= TCINTEN; | 1376 | edesc->pset[i].param.opt |= TCINTEN; |
1377 | |||
1378 | /* Also enable intermediate interrupts if necessary */ | ||
1379 | if (use_intermediate) | ||
1380 | edesc->pset[i].param.opt |= ITCINTEN; | ||
1381 | } | ||
1363 | } | 1382 | } |
1364 | 1383 | ||
1365 | /* Place the cyclic channel to highest priority queue */ | 1384 | /* Place the cyclic channel to highest priority queue */ |
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data) | |||
1570 | return IRQ_HANDLED; | 1589 | return IRQ_HANDLED; |
1571 | } | 1590 | } |
1572 | 1591 | ||
1573 | static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) | ||
1574 | { | ||
1575 | struct platform_device *tc_pdev; | ||
1576 | int ret; | ||
1577 | |||
1578 | if (!IS_ENABLED(CONFIG_OF) || !tc) | ||
1579 | return; | ||
1580 | |||
1581 | tc_pdev = of_find_device_by_node(tc->node); | ||
1582 | if (!tc_pdev) { | ||
1583 | pr_err("%s: TPTC device is not found\n", __func__); | ||
1584 | return; | ||
1585 | } | ||
1586 | if (!pm_runtime_enabled(&tc_pdev->dev)) | ||
1587 | pm_runtime_enable(&tc_pdev->dev); | ||
1588 | |||
1589 | if (enable) | ||
1590 | ret = pm_runtime_get_sync(&tc_pdev->dev); | ||
1591 | else | ||
1592 | ret = pm_runtime_put_sync(&tc_pdev->dev); | ||
1593 | |||
1594 | if (ret < 0) | ||
1595 | pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__, | ||
1596 | enable ? "get" : "put", dev_name(&tc_pdev->dev)); | ||
1597 | } | ||
1598 | |||
1599 | /* Alloc channel resources */ | 1592 | /* Alloc channel resources */ |
1600 | static int edma_alloc_chan_resources(struct dma_chan *chan) | 1593 | static int edma_alloc_chan_resources(struct dma_chan *chan) |
1601 | { | 1594 | { |
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) | |||
1632 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, | 1625 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, |
1633 | echan->hw_triggered ? "HW" : "SW"); | 1626 | echan->hw_triggered ? "HW" : "SW"); |
1634 | 1627 | ||
1635 | edma_tc_set_pm_state(echan->tc, true); | ||
1636 | |||
1637 | return 0; | 1628 | return 0; |
1638 | 1629 | ||
1639 | err_slot: | 1630 | err_slot: |
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan) | |||
1670 | echan->alloced = false; | 1661 | echan->alloced = false; |
1671 | } | 1662 | } |
1672 | 1663 | ||
1673 | edma_tc_set_pm_state(echan->tc, false); | ||
1674 | echan->tc = NULL; | 1664 | echan->tc = NULL; |
1675 | echan->hw_triggered = false; | 1665 | echan->hw_triggered = false; |
1676 | 1666 | ||
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev) | |||
2417 | int i; | 2407 | int i; |
2418 | 2408 | ||
2419 | for (i = 0; i < ecc->num_channels; i++) { | 2409 | for (i = 0; i < ecc->num_channels; i++) { |
2420 | if (echan[i].alloced) { | 2410 | if (echan[i].alloced) |
2421 | edma_setup_interrupt(&echan[i], false); | 2411 | edma_setup_interrupt(&echan[i], false); |
2422 | edma_tc_set_pm_state(echan[i].tc, false); | ||
2423 | } | ||
2424 | } | 2412 | } |
2425 | 2413 | ||
2426 | return 0; | 2414 | return 0; |
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev) | |||
2450 | 2438 | ||
2451 | /* Set up channel -> slot mapping for the entry slot */ | 2439 | /* Set up channel -> slot mapping for the entry slot */ |
2452 | edma_set_chmap(&echan[i], echan[i].slot[0]); | 2440 | edma_set_chmap(&echan[i], echan[i].slot[0]); |
2453 | |||
2454 | edma_tc_set_pm_state(echan[i].tc, true); | ||
2455 | } | 2441 | } |
2456 | } | 2442 | } |
2457 | 2443 | ||
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = { | |||
2475 | 2461 | ||
2476 | static int edma_tptc_probe(struct platform_device *pdev) | 2462 | static int edma_tptc_probe(struct platform_device *pdev) |
2477 | { | 2463 | { |
2478 | return 0; | 2464 | pm_runtime_enable(&pdev->dev); |
2465 | return pm_runtime_get_sync(&pdev->dev); | ||
2479 | } | 2466 | } |
2480 | 2467 | ||
2481 | static struct platform_driver edma_tptc_driver = { | 2468 | static struct platform_driver edma_tptc_driver = { |
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index eef145edb936..ee510515ce18 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c | |||
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) | |||
64 | 64 | ||
65 | if (hsuc->direction == DMA_MEM_TO_DEV) { | 65 | if (hsuc->direction == DMA_MEM_TO_DEV) { |
66 | bsr = config->dst_maxburst; | 66 | bsr = config->dst_maxburst; |
67 | mtsr = config->dst_addr_width; | 67 | mtsr = config->src_addr_width; |
68 | } else if (hsuc->direction == DMA_DEV_TO_MEM) { | 68 | } else if (hsuc->direction == DMA_DEV_TO_MEM) { |
69 | bsr = config->src_maxburst; | 69 | bsr = config->src_maxburst; |
70 | mtsr = config->src_addr_width; | 70 | mtsr = config->dst_addr_width; |
71 | } | 71 | } |
72 | 72 | ||
73 | hsu_chan_disable(hsuc); | 73 | hsu_chan_disable(hsuc); |
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) | |||
135 | sr = hsu_chan_readl(hsuc, HSU_CH_SR); | 135 | sr = hsu_chan_readl(hsuc, HSU_CH_SR); |
136 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); | 136 | spin_unlock_irqrestore(&hsuc->vchan.lock, flags); |
137 | 137 | ||
138 | return sr; | 138 | return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); |
139 | } | 139 | } |
140 | 140 | ||
141 | irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) | 141 | irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) |
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan) | |||
254 | static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) | 254 | static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) |
255 | { | 255 | { |
256 | struct hsu_dma_desc *desc = hsuc->desc; | 256 | struct hsu_dma_desc *desc = hsuc->desc; |
257 | size_t bytes = desc->length; | 257 | size_t bytes = 0; |
258 | int i; | 258 | int i; |
259 | 259 | ||
260 | i = desc->active % HSU_DMA_CHAN_NR_DESC; | 260 | for (i = desc->active; i < desc->nents; i++) |
261 | bytes += desc->sg[i].len; | ||
262 | |||
263 | i = HSU_DMA_CHAN_NR_DESC - 1; | ||
261 | do { | 264 | do { |
262 | bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); | 265 | bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); |
263 | } while (--i >= 0); | 266 | } while (--i >= 0); |
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h index 578a8ee8cd05..6b070c22b1df 100644 --- a/drivers/dma/hsu/hsu.h +++ b/drivers/dma/hsu/hsu.h | |||
@@ -41,6 +41,9 @@ | |||
41 | #define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) | 41 | #define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) |
42 | #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) | 42 | #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) |
43 | #define HSU_CH_SR_CHE BIT(15) | 43 | #define HSU_CH_SR_CHE BIT(15) |
44 | #define HSU_CH_SR_DESCE(x) BIT(16 + (x)) | ||
45 | #define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16)) | ||
46 | #define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30)) | ||
44 | 47 | ||
45 | /* Bits in HSU_CH_CR */ | 48 | /* Bits in HSU_CH_CR */ |
46 | #define HSU_CH_CR_CHA BIT(0) | 49 | #define HSU_CH_CR_CHA BIT(0) |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 43bd5aee7ffe..1e984e18c126 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -48,6 +48,7 @@ struct omap_chan { | |||
48 | unsigned dma_sig; | 48 | unsigned dma_sig; |
49 | bool cyclic; | 49 | bool cyclic; |
50 | bool paused; | 50 | bool paused; |
51 | bool running; | ||
51 | 52 | ||
52 | int dma_ch; | 53 | int dma_ch; |
53 | struct omap_desc *desc; | 54 | struct omap_desc *desc; |
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) | |||
294 | 295 | ||
295 | /* Enable channel */ | 296 | /* Enable channel */ |
296 | omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); | 297 | omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); |
298 | |||
299 | c->running = true; | ||
297 | } | 300 | } |
298 | 301 | ||
299 | static void omap_dma_stop(struct omap_chan *c) | 302 | static void omap_dma_stop(struct omap_chan *c) |
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c) | |||
355 | 358 | ||
356 | omap_dma_chan_write(c, CLNK_CTRL, val); | 359 | omap_dma_chan_write(c, CLNK_CTRL, val); |
357 | } | 360 | } |
361 | |||
362 | c->running = false; | ||
358 | } | 363 | } |
359 | 364 | ||
360 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, | 365 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, |
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | |||
673 | struct omap_chan *c = to_omap_dma_chan(chan); | 678 | struct omap_chan *c = to_omap_dma_chan(chan); |
674 | struct virt_dma_desc *vd; | 679 | struct virt_dma_desc *vd; |
675 | enum dma_status ret; | 680 | enum dma_status ret; |
676 | uint32_t ccr; | ||
677 | unsigned long flags; | 681 | unsigned long flags; |
678 | 682 | ||
679 | ccr = omap_dma_chan_read(c, CCR); | ||
680 | /* The channel is no longer active, handle the completion right away */ | ||
681 | if (!(ccr & CCR_ENABLE)) | ||
682 | omap_dma_callback(c->dma_ch, 0, c); | ||
683 | |||
684 | ret = dma_cookie_status(chan, cookie, txstate); | 683 | ret = dma_cookie_status(chan, cookie, txstate); |
684 | |||
685 | if (!c->paused && c->running) { | ||
686 | uint32_t ccr = omap_dma_chan_read(c, CCR); | ||
687 | /* | ||
688 | * The channel is no longer active, set the return value | ||
689 | * accordingly | ||
690 | */ | ||
691 | if (!(ccr & CCR_ENABLE)) | ||
692 | ret = DMA_COMPLETE; | ||
693 | } | ||
694 | |||
685 | if (ret == DMA_COMPLETE || !txstate) | 695 | if (ret == DMA_COMPLETE || !txstate) |
686 | return ret; | 696 | return ret; |
687 | 697 | ||
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( | |||
945 | d->ccr = c->ccr; | 955 | d->ccr = c->ccr; |
946 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; | 956 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; |
947 | 957 | ||
948 | d->cicr = CICR_DROP_IE; | 958 | d->cicr = CICR_DROP_IE | CICR_FRAME_IE; |
949 | if (tx_flags & DMA_PREP_INTERRUPT) | ||
950 | d->cicr |= CICR_FRAME_IE; | ||
951 | 959 | ||
952 | d->csdp = data_type; | 960 | d->csdp = data_type; |
953 | 961 | ||
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 0ee0321868d3..ef67f278e076 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | |||
1236 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; | 1236 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; |
1237 | int chan_id = dma_spec->args[0]; | 1237 | int chan_id = dma_spec->args[0]; |
1238 | 1238 | ||
1239 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) | 1239 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) |
1240 | return NULL; | 1240 | return NULL; |
1241 | 1241 | ||
1242 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); | 1242 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); |