aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/sh/shdma-base.c
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2012-07-05 06:29:41 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-07-20 01:53:45 -0400
commitc2cdb7e4d16394fc51dc5c2c5b3e7c3733bdfaac (patch)
tree1e98394a20b33ed09d55b5e37a1c1ed2784721d6 /drivers/dma/sh/shdma-base.c
parentecf90fbbdc66cde6f5fa25d88541112b9baac459 (diff)
dma: sh: use an integer slave ID to improve API compatibility
Initially struct shdma_slave has been introduced with the only member - an unsigned slave ID - to describe common properties of DMA slaves in an extensible way. However, experience shows, that a slave ID is indeed the only parameter, needed to identify DMA slaves. This is also, what is used by the core dmaengine API in struct dma_slave_config. We switch to using the slave_id directly, instead of passing a pointer to struct shdma_slave to improve compatibility with the core. We also make the slave_id signed for easier error checking. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma/sh/shdma-base.c')
-rw-r--r--drivers/dma/sh/shdma-base.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index f75ebfa735c0..73db282a1436 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -76,7 +76,6 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
76 container_of(tx, struct shdma_desc, async_tx), 76 container_of(tx, struct shdma_desc, async_tx),
77 *last = desc; 77 *last = desc;
78 struct shdma_chan *schan = to_shdma_chan(tx->chan); 78 struct shdma_chan *schan = to_shdma_chan(tx->chan);
79 struct shdma_slave *slave = schan->slave;
80 dma_async_tx_callback callback = tx->callback; 79 dma_async_tx_callback callback = tx->callback;
81 dma_cookie_t cookie; 80 dma_cookie_t cookie;
82 bool power_up; 81 bool power_up;
@@ -138,7 +137,7 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
138 * Make it int then, on error remove chunks from the 137 * Make it int then, on error remove chunks from the
139 * queue again 138 * queue again
140 */ 139 */
141 ops->setup_xfer(schan, slave); 140 ops->setup_xfer(schan, schan->slave_id);
142 141
143 if (schan->pm_state == SHDMA_PM_PENDING) 142 if (schan->pm_state == SHDMA_PM_PENDING)
144 shdma_chan_xfer_ld_queue(schan); 143 shdma_chan_xfer_ld_queue(schan);
@@ -186,7 +185,7 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
186 * never runs concurrently with itself or free_chan_resources. 185 * never runs concurrently with itself or free_chan_resources.
187 */ 186 */
188 if (slave) { 187 if (slave) {
189 if (slave->slave_id >= slave_num) { 188 if (slave->slave_id < 0 || slave->slave_id >= slave_num) {
190 ret = -EINVAL; 189 ret = -EINVAL;
191 goto evalid; 190 goto evalid;
192 } 191 }
@@ -196,9 +195,13 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
196 goto etestused; 195 goto etestused;
197 } 196 }
198 197
199 ret = ops->set_slave(schan, slave); 198 ret = ops->set_slave(schan, slave->slave_id);
200 if (ret < 0) 199 if (ret < 0)
201 goto esetslave; 200 goto esetslave;
201
202 schan->slave_id = slave->slave_id;
203 } else {
204 schan->slave_id = -EINVAL;
202 } 205 }
203 206
204 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, 207 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
@@ -208,7 +211,6 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
208 goto edescalloc; 211 goto edescalloc;
209 } 212 }
210 schan->desc_num = NR_DESCS_PER_CHANNEL; 213 schan->desc_num = NR_DESCS_PER_CHANNEL;
211 schan->slave = slave;
212 214
213 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { 215 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
214 desc = ops->embedded_desc(schan->desc, i); 216 desc = ops->embedded_desc(schan->desc, i);
@@ -366,10 +368,9 @@ static void shdma_free_chan_resources(struct dma_chan *chan)
366 if (!list_empty(&schan->ld_queue)) 368 if (!list_empty(&schan->ld_queue))
367 shdma_chan_ld_cleanup(schan, true); 369 shdma_chan_ld_cleanup(schan, true);
368 370
369 if (schan->slave) { 371 if (schan->slave_id >= 0) {
370 /* The caller is holding dma_list_mutex */ 372 /* The caller is holding dma_list_mutex */
371 struct shdma_slave *slave = schan->slave; 373 clear_bit(schan->slave_id, shdma_slave_used);
372 clear_bit(slave->slave_id, shdma_slave_used);
373 chan->private = NULL; 374 chan->private = NULL;
374 } 375 }
375 376
@@ -559,7 +560,7 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
559 struct shdma_chan *schan = to_shdma_chan(chan); 560 struct shdma_chan *schan = to_shdma_chan(chan);
560 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 561 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
561 const struct shdma_ops *ops = sdev->ops; 562 const struct shdma_ops *ops = sdev->ops;
562 struct shdma_slave *slave = schan->slave; 563 int slave_id = schan->slave_id;
563 dma_addr_t slave_addr; 564 dma_addr_t slave_addr;
564 565
565 if (!chan) 566 if (!chan)
@@ -568,9 +569,9 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
568 BUG_ON(!schan->desc_num); 569 BUG_ON(!schan->desc_num);
569 570
570 /* Someone calling slave DMA on a generic channel? */ 571 /* Someone calling slave DMA on a generic channel? */
571 if (!slave || !sg_len) { 572 if (slave_id < 0 || !sg_len) {
572 dev_warn(schan->dev, "%s: bad parameter: %p, %d, %d\n", 573 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
573 __func__, slave, sg_len, slave ? slave->slave_id : -1); 574 __func__, sg_len, slave_id);
574 return NULL; 575 return NULL;
575 } 576 }
576 577