aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorRabin Vincent <rabin.vincent@stericsson.com>2010-10-06 04:20:38 -0400
committerDan Williams <dan.j.williams@intel.com>2010-10-07 17:54:55 -0400
commit819504f4861a5bc1e25c82409956388bb79fb635 (patch)
tree6adfb3ab04da0dfa87d677502fa902c93991b2b5 /drivers/dma
parenta2c15fa4c122558472f8041515072218c8652c7e (diff)
DMAENGINE: ste_dma40: fix resource leaks in error paths.
Fix some leaks of allocated descriptors in error paths. Acked-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Rabin Vincent <rabin.vincent@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ste_dma40.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 554e2942667c..c07d989f26d7 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1728,6 +1728,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1728 1728
1729 return &d40d->txd; 1729 return &d40d->txd;
1730err: 1730err:
1731 if (d40d)
1732 d40_desc_free(d40c, d40d);
1731 spin_unlock_irqrestore(&d40c->lock, flags); 1733 spin_unlock_irqrestore(&d40c->lock, flags);
1732 return NULL; 1734 return NULL;
1733} 1735}
@@ -1939,8 +1941,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1939err_fill_lli: 1941err_fill_lli:
1940 dev_err(&d40c->chan.dev->device, 1942 dev_err(&d40c->chan.dev->device,
1941 "[%s] Failed filling in PHY LLI\n", __func__); 1943 "[%s] Failed filling in PHY LLI\n", __func__);
1942 d40_pool_lli_free(d40d);
1943err: 1944err:
1945 if (d40d)
1946 d40_desc_free(d40c, d40d);
1944 spin_unlock_irqrestore(&d40c->lock, flags); 1947 spin_unlock_irqrestore(&d40c->lock, flags);
1945 return NULL; 1948 return NULL;
1946} 1949}
@@ -2079,10 +2082,9 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2079 2082
2080 spin_lock_irqsave(&d40c->lock, flags); 2083 spin_lock_irqsave(&d40c->lock, flags);
2081 d40d = d40_desc_get(d40c); 2084 d40d = d40_desc_get(d40c);
2082 spin_unlock_irqrestore(&d40c->lock, flags);
2083 2085
2084 if (d40d == NULL) 2086 if (d40d == NULL)
2085 return NULL; 2087 goto err;
2086 2088
2087 if (d40c->log_num != D40_PHY_CHAN) 2089 if (d40c->log_num != D40_PHY_CHAN)
2088 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 2090 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
@@ -2095,7 +2097,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2095 "[%s] Failed to prepare %s slave sg job: %d\n", 2097 "[%s] Failed to prepare %s slave sg job: %d\n",
2096 __func__, 2098 __func__,
2097 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); 2099 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2098 return NULL; 2100 goto err;
2099 } 2101 }
2100 2102
2101 d40d->txd.flags = dma_flags; 2103 d40d->txd.flags = dma_flags;
@@ -2104,7 +2106,14 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2104 2106
2105 d40d->txd.tx_submit = d40_tx_submit; 2107 d40d->txd.tx_submit = d40_tx_submit;
2106 2108
2109 spin_unlock_irqrestore(&d40c->lock, flags);
2107 return &d40d->txd; 2110 return &d40d->txd;
2111
2112err:
2113 if (d40d)
2114 d40_desc_free(d40c, d40d);
2115 spin_unlock_irqrestore(&d40c->lock, flags);
2116 return NULL;
2108} 2117}
2109 2118
2110static enum dma_status d40_tx_status(struct dma_chan *chan, 2119static enum dma_status d40_tx_status(struct dma_chan *chan,