aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>2014-07-30 20:34:06 -0400
committerVinod Koul <vinod.koul@intel.com>2014-07-31 07:45:45 -0400
commit4415b03abb0aacd937010f13310b7fa437b9ad7d (patch)
treec58ffe76c1a538e6da5c553da5c3e463ddcf62b0 /drivers/dma
parentc091ff51b4d2543b828d53ce47f66905dee870fd (diff)
dmaengine: shdma: Allocate cyclic sg list dynamically
The sg list used to prepare cyclic DMA descriptors is currently allocated statically on the stack as an array of 32 elements. This makes the shdma_prep_dma_cyclic() function consume a lot of stack space, as reported by the compiler: drivers/dma/sh/shdma-base.c: In function ‘shdma_prep_dma_cyclic’: drivers/dma/sh/shdma-base.c:715:1: warning: the frame size of 1056 bytes is larger than 1024 bytes [-Wframe-larger-than=] Given the limited Linux kernel stack size, this could lead to stack overflows. Fix the problem by allocating the sg list dynamically. Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> Signed-off-by: Simon Horman <horms+renesas@verge.net.au> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/sh/shdma-base.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 94b6bde6c86a..e427a03a0e8b 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -672,11 +672,12 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
672{ 672{
673 struct shdma_chan *schan = to_shdma_chan(chan); 673 struct shdma_chan *schan = to_shdma_chan(chan);
674 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 674 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
675 struct dma_async_tx_descriptor *desc;
675 const struct shdma_ops *ops = sdev->ops; 676 const struct shdma_ops *ops = sdev->ops;
676 unsigned int sg_len = buf_len / period_len; 677 unsigned int sg_len = buf_len / period_len;
677 int slave_id = schan->slave_id; 678 int slave_id = schan->slave_id;
678 dma_addr_t slave_addr; 679 dma_addr_t slave_addr;
679 struct scatterlist sgl[SHDMA_MAX_SG_LEN]; 680 struct scatterlist *sgl;
680 int i; 681 int i;
681 682
682 if (!chan) 683 if (!chan)
@@ -700,7 +701,16 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
700 701
701 slave_addr = ops->slave_addr(schan); 702 slave_addr = ops->slave_addr(schan);
702 703
704 /*
705 * Allocate the sg list dynamically as it would consumer too much stack
706 * space.
707 */
708 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
709 if (!sgl)
710 return NULL;
711
703 sg_init_table(sgl, sg_len); 712 sg_init_table(sgl, sg_len);
713
704 for (i = 0; i < sg_len; i++) { 714 for (i = 0; i < sg_len; i++) {
705 dma_addr_t src = buf_addr + (period_len * i); 715 dma_addr_t src = buf_addr + (period_len * i);
706 716
@@ -710,8 +720,11 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
710 sg_dma_len(&sgl[i]) = period_len; 720 sg_dma_len(&sgl[i]) = period_len;
711 } 721 }
712 722
713 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, 723 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
714 direction, flags, true); 724 direction, flags, true);
725
726 kfree(sgl);
727 return desc;
715} 728}
716 729
717static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 730static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,