diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-05-26 09:27:40 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-07-01 09:16:00 -0400 |
commit | 879f127bb2d0b604cf49f7682c0431d47f42f8f9 (patch) | |
tree | 2c93c02299899cf17de53bf5d6157cfc18d5f9d5 /drivers/dma/amba-pl08x.c | |
parent | 083be28a1056eaaebdf116126b9d859348160f45 (diff) |
dmaengine: PL08x: convert to use vchan submitted/issued lists
Convert to use the virtual dma channel submitted/issued descriptor
lists rather than our own private lists, and use the virtual dma
channel support functions to manage these lists.
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/dma/amba-pl08x.c')
-rw-r--r-- | drivers/dma/amba-pl08x.c | 64 |
1 files changed, 17 insertions, 47 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 398a5da6f439..5333a91518ed 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -215,8 +215,6 @@ enum pl08x_dma_chan_state { | |||
215 | * @name: name of channel | 215 | * @name: name of channel |
216 | * @cd: channel platform data | 216 | * @cd: channel platform data |
217 | * @runtime_addr: address for RX/TX according to the runtime config | 217 | * @runtime_addr: address for RX/TX according to the runtime config |
218 | * @pend_list: queued transactions pending on this channel | ||
219 | * @issued_list: issued transactions for this channel | ||
220 | * @done_list: list of completed transactions | 218 | * @done_list: list of completed transactions |
221 | * @at: active transaction on this channel | 219 | * @at: active transaction on this channel |
222 | * @lock: a lock for this channel data | 220 | * @lock: a lock for this channel data |
@@ -233,8 +231,6 @@ struct pl08x_dma_chan { | |||
233 | const char *name; | 231 | const char *name; |
234 | const struct pl08x_channel_data *cd; | 232 | const struct pl08x_channel_data *cd; |
235 | struct dma_slave_config cfg; | 233 | struct dma_slave_config cfg; |
236 | struct list_head pend_list; | ||
237 | struct list_head issued_list; | ||
238 | struct list_head done_list; | 234 | struct list_head done_list; |
239 | struct pl08x_txd *at; | 235 | struct pl08x_txd *at; |
240 | struct pl08x_driver_data *host; | 236 | struct pl08x_driver_data *host; |
@@ -357,12 +353,12 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) | |||
357 | { | 353 | { |
358 | struct pl08x_driver_data *pl08x = plchan->host; | 354 | struct pl08x_driver_data *pl08x = plchan->host; |
359 | struct pl08x_phy_chan *phychan = plchan->phychan; | 355 | struct pl08x_phy_chan *phychan = plchan->phychan; |
356 | struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); | ||
357 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | ||
360 | struct pl08x_lli *lli; | 358 | struct pl08x_lli *lli; |
361 | struct pl08x_txd *txd; | ||
362 | u32 val; | 359 | u32 val; |
363 | 360 | ||
364 | txd = list_first_entry(&plchan->issued_list, struct pl08x_txd, node); | 361 | list_del(&txd->vd.node); |
365 | list_del(&txd->node); | ||
366 | 362 | ||
367 | plchan->at = txd; | 363 | plchan->at = txd; |
368 | 364 | ||
@@ -524,18 +520,18 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |||
524 | } | 520 | } |
525 | 521 | ||
526 | /* Sum up all queued transactions */ | 522 | /* Sum up all queued transactions */ |
527 | if (!list_empty(&plchan->issued_list)) { | 523 | if (!list_empty(&plchan->vc.desc_issued)) { |
528 | struct pl08x_txd *txdi; | 524 | struct pl08x_txd *txdi; |
529 | list_for_each_entry(txdi, &plchan->issued_list, node) { | 525 | list_for_each_entry(txdi, &plchan->vc.desc_issued, vd.node) { |
530 | struct pl08x_sg *dsg; | 526 | struct pl08x_sg *dsg; |
531 | list_for_each_entry(dsg, &txd->dsg_list, node) | 527 | list_for_each_entry(dsg, &txd->dsg_list, node) |
532 | bytes += dsg->len; | 528 | bytes += dsg->len; |
533 | } | 529 | } |
534 | } | 530 | } |
535 | 531 | ||
536 | if (!list_empty(&plchan->pend_list)) { | 532 | if (!list_empty(&plchan->vc.desc_submitted)) { |
537 | struct pl08x_txd *txdi; | 533 | struct pl08x_txd *txdi; |
538 | list_for_each_entry(txdi, &plchan->pend_list, node) { | 534 | list_for_each_entry(txdi, &plchan->vc.desc_submitted, vd.node) { |
539 | struct pl08x_sg *dsg; | 535 | struct pl08x_sg *dsg; |
540 | list_for_each_entry(dsg, &txd->dsg_list, node) | 536 | list_for_each_entry(dsg, &txd->dsg_list, node) |
541 | bytes += dsg->len; | 537 | bytes += dsg->len; |
@@ -1094,13 +1090,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |||
1094 | LIST_HEAD(head); | 1090 | LIST_HEAD(head); |
1095 | struct pl08x_txd *txd; | 1091 | struct pl08x_txd *txd; |
1096 | 1092 | ||
1097 | list_splice_tail_init(&plchan->issued_list, &head); | 1093 | vchan_get_all_descriptors(&plchan->vc, &head); |
1098 | list_splice_tail_init(&plchan->pend_list, &head); | ||
1099 | 1094 | ||
1100 | while (!list_empty(&head)) { | 1095 | while (!list_empty(&head)) { |
1101 | txd = list_first_entry(&head, struct pl08x_txd, node); | 1096 | txd = list_first_entry(&head, struct pl08x_txd, vd.node); |
1102 | pl08x_release_mux(plchan); | 1097 | pl08x_release_mux(plchan); |
1103 | list_del(&txd->node); | 1098 | list_del(&txd->vd.node); |
1104 | pl08x_free_txd(pl08x, txd); | 1099 | pl08x_free_txd(pl08x, txd); |
1105 | } | 1100 | } |
1106 | } | 1101 | } |
@@ -1117,23 +1112,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan) | |||
1117 | { | 1112 | { |
1118 | } | 1113 | } |
1119 | 1114 | ||
1120 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | ||
1121 | { | ||
1122 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | ||
1123 | struct pl08x_txd *txd = to_pl08x_txd(tx); | ||
1124 | unsigned long flags; | ||
1125 | dma_cookie_t cookie; | ||
1126 | |||
1127 | spin_lock_irqsave(&plchan->vc.lock, flags); | ||
1128 | cookie = dma_cookie_assign(tx); | ||
1129 | |||
1130 | /* Put this onto the pending list */ | ||
1131 | list_add_tail(&txd->node, &plchan->pend_list); | ||
1132 | spin_unlock_irqrestore(&plchan->vc.lock, flags); | ||
1133 | |||
1134 | return cookie; | ||
1135 | } | ||
1136 | |||
1137 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | 1115 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( |
1138 | struct dma_chan *chan, unsigned long flags) | 1116 | struct dma_chan *chan, unsigned long flags) |
1139 | { | 1117 | { |
@@ -1318,8 +1296,7 @@ static void pl08x_issue_pending(struct dma_chan *chan) | |||
1318 | unsigned long flags; | 1296 | unsigned long flags; |
1319 | 1297 | ||
1320 | spin_lock_irqsave(&plchan->vc.lock, flags); | 1298 | spin_lock_irqsave(&plchan->vc.lock, flags); |
1321 | list_splice_tail_init(&plchan->pend_list, &plchan->issued_list); | 1299 | if (vchan_issue_pending(&plchan->vc)) { |
1322 | if (!list_empty(&plchan->issued_list)) { | ||
1323 | if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) | 1300 | if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) |
1324 | pl08x_phy_alloc_and_start(plchan); | 1301 | pl08x_phy_alloc_and_start(plchan); |
1325 | } | 1302 | } |
@@ -1345,16 +1322,11 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1345 | return 0; | 1322 | return 0; |
1346 | } | 1323 | } |
1347 | 1324 | ||
1348 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | 1325 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) |
1349 | unsigned long flags) | ||
1350 | { | 1326 | { |
1351 | struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | 1327 | struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); |
1352 | 1328 | ||
1353 | if (txd) { | 1329 | if (txd) { |
1354 | dma_async_tx_descriptor_init(&txd->vd.tx, &plchan->vc.chan); | ||
1355 | txd->vd.tx.flags = flags; | ||
1356 | txd->vd.tx.tx_submit = pl08x_tx_submit; | ||
1357 | INIT_LIST_HEAD(&txd->node); | ||
1358 | INIT_LIST_HEAD(&txd->dsg_list); | 1330 | INIT_LIST_HEAD(&txd->dsg_list); |
1359 | 1331 | ||
1360 | /* Always enable error and terminal interrupts */ | 1332 | /* Always enable error and terminal interrupts */ |
@@ -1377,7 +1349,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1377 | struct pl08x_sg *dsg; | 1349 | struct pl08x_sg *dsg; |
1378 | int ret; | 1350 | int ret; |
1379 | 1351 | ||
1380 | txd = pl08x_get_txd(plchan, flags); | 1352 | txd = pl08x_get_txd(plchan); |
1381 | if (!txd) { | 1353 | if (!txd) { |
1382 | dev_err(&pl08x->adev->dev, | 1354 | dev_err(&pl08x->adev->dev, |
1383 | "%s no memory for descriptor\n", __func__); | 1355 | "%s no memory for descriptor\n", __func__); |
@@ -1413,7 +1385,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1413 | if (ret) | 1385 | if (ret) |
1414 | return NULL; | 1386 | return NULL; |
1415 | 1387 | ||
1416 | return &txd->vd.tx; | 1388 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); |
1417 | } | 1389 | } |
1418 | 1390 | ||
1419 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1391 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
@@ -1435,7 +1407,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1435 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | 1407 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", |
1436 | __func__, sg_dma_len(sgl), plchan->name); | 1408 | __func__, sg_dma_len(sgl), plchan->name); |
1437 | 1409 | ||
1438 | txd = pl08x_get_txd(plchan, flags); | 1410 | txd = pl08x_get_txd(plchan); |
1439 | if (!txd) { | 1411 | if (!txd) { |
1440 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | 1412 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); |
1441 | return NULL; | 1413 | return NULL; |
@@ -1529,7 +1501,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1529 | if (ret) | 1501 | if (ret) |
1530 | return NULL; | 1502 | return NULL; |
1531 | 1503 | ||
1532 | return &txd->vd.tx; | 1504 | return vchan_tx_prep(&plchan->vc, &txd->vd, flags); |
1533 | } | 1505 | } |
1534 | 1506 | ||
1535 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1507 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -1739,7 +1711,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1739 | * And start the next descriptor (if any), | 1711 | * And start the next descriptor (if any), |
1740 | * otherwise free this channel. | 1712 | * otherwise free this channel. |
1741 | */ | 1713 | */ |
1742 | if (!list_empty(&plchan->issued_list)) | 1714 | if (vchan_next_desc(&plchan->vc)) |
1743 | pl08x_start_next_txd(plchan); | 1715 | pl08x_start_next_txd(plchan); |
1744 | else | 1716 | else |
1745 | pl08x_phy_free(plchan); | 1717 | pl08x_phy_free(plchan); |
@@ -1807,8 +1779,6 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1807 | "initialize virtual channel \"%s\"\n", | 1779 | "initialize virtual channel \"%s\"\n", |
1808 | chan->name); | 1780 | chan->name); |
1809 | 1781 | ||
1810 | INIT_LIST_HEAD(&chan->pend_list); | ||
1811 | INIT_LIST_HEAD(&chan->issued_list); | ||
1812 | INIT_LIST_HEAD(&chan->done_list); | 1782 | INIT_LIST_HEAD(&chan->done_list); |
1813 | tasklet_init(&chan->tasklet, pl08x_tasklet, | 1783 | tasklet_init(&chan->tasklet, pl08x_tasklet, |
1814 | (unsigned long) chan); | 1784 | (unsigned long) chan); |