aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>2015-01-08 11:29:25 -0500
committerVinod Koul <vinod.koul@intel.com>2015-02-12 02:22:19 -0500
commita55e07c8a5aaf5442d10b0b392ce8ce41a96921d (patch)
treeac6213a61908009937b282c72d4c70d2d598fc43 /drivers/dma
parentbf44a4175e566c72ae2d01929f76a04a9e861e0d (diff)
dmaengine: rcar-dmac: Fix uninitialized variable usage
The desc variable is used uninitialized in the rcar_dmac_desc_get() and rcar_dmac_xfer_chunk_get() functions if descriptors need to be allocated. Fix it. Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/sh/rcar-dmac.c69
1 files changed, 31 insertions, 38 deletions
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 29dd09ad41ff..8367578bac63 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -549,26 +549,22 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
549 549
550 spin_lock_irq(&chan->lock); 550 spin_lock_irq(&chan->lock);
551 551
552 do { 552 while (list_empty(&chan->desc.free)) {
553 if (list_empty(&chan->desc.free)) { 553 /*
554 /* 554 * No free descriptors, allocate a page worth of them and try
555 * No free descriptors, allocate a page worth of them 555 * again, as someone else could race us to get the newly
556 * and try again, as someone else could race us to get 556 * allocated descriptors. If the allocation fails return an
557 * the newly allocated descriptors. If the allocation 557 * error.
558 * fails return an error. 558 */
559 */ 559 spin_unlock_irq(&chan->lock);
560 spin_unlock_irq(&chan->lock); 560 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
561 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); 561 if (ret < 0)
562 if (ret < 0) 562 return NULL;
563 return NULL; 563 spin_lock_irq(&chan->lock);
564 spin_lock_irq(&chan->lock); 564 }
565 continue;
566 }
567 565
568 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, 566 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
569 node); 567 list_del(&desc->node);
570 list_del(&desc->node);
571 } while (!desc);
572 568
573 spin_unlock_irq(&chan->lock); 569 spin_unlock_irq(&chan->lock);
574 570
@@ -621,26 +617,23 @@ rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
621 617
622 spin_lock_irq(&chan->lock); 618 spin_lock_irq(&chan->lock);
623 619
624 do { 620 while (list_empty(&chan->desc.chunks_free)) {
625 if (list_empty(&chan->desc.chunks_free)) { 621 /*
626 /* 622 * No free descriptors, allocate a page worth of them and try
627 * No free descriptors, allocate a page worth of them 623 * again, as someone else could race us to get the newly
628 * and try again, as someone else could race us to get 624 * allocated descriptors. If the allocation fails return an
629 * the newly allocated descriptors. If the allocation 625 * error.
630 * fails return an error. 626 */
631 */ 627 spin_unlock_irq(&chan->lock);
632 spin_unlock_irq(&chan->lock); 628 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
633 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); 629 if (ret < 0)
634 if (ret < 0) 630 return NULL;
635 return NULL; 631 spin_lock_irq(&chan->lock);
636 spin_lock_irq(&chan->lock); 632 }
637 continue;
638 }
639 633
640 chunk = list_first_entry(&chan->desc.chunks_free, 634 chunk = list_first_entry(&chan->desc.chunks_free,
641 struct rcar_dmac_xfer_chunk, node); 635 struct rcar_dmac_xfer_chunk, node);
642 list_del(&chunk->node); 636 list_del(&chunk->node);
643 } while (!chunk);
644 637
645 spin_unlock_irq(&chan->lock); 638 spin_unlock_irq(&chan->lock);
646 639