diff options
author | Jonas Aaberg <jonas.aberg@stericsson.com> | 2010-06-20 17:25:31 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-06-22 21:01:54 -0400 |
commit | 0d0f6b8bbb13e1287f4f2a271e4f4306789511a2 (patch) | |
tree | b1ed6b7293cdb16a658b18ce3640bb2354cdaca3 | |
parent | 2a6143407d9114a0c5d16a7eed1a0892a4ce9f19 (diff) |
DMAENGINE: ste_dma40: deny ops on non-alloc channels
Added checks to deny operating on none-allocated channels.
Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/dma/ste_dma40.c | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 1d176642e523..4d56d214fa05 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -1515,6 +1515,12 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1515 | chan); | 1515 | chan); |
1516 | unsigned long flags; | 1516 | unsigned long flags; |
1517 | 1517 | ||
1518 | if (d40c->phy_chan == NULL) { | ||
1519 | dev_err(&d40c->chan.dev->device, | ||
1520 | "[%s] Unallocated channel.\n", __func__); | ||
1521 | return ERR_PTR(-EINVAL); | ||
1522 | } | ||
1523 | |||
1518 | spin_lock_irqsave(&d40c->lock, flags); | 1524 | spin_lock_irqsave(&d40c->lock, flags); |
1519 | d40d = d40_desc_get(d40c); | 1525 | d40d = d40_desc_get(d40c); |
1520 | 1526 | ||
@@ -1710,6 +1716,13 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1710 | int err; | 1716 | int err; |
1711 | unsigned long flags; | 1717 | unsigned long flags; |
1712 | 1718 | ||
1719 | if (d40c->phy_chan == NULL) { | ||
1720 | dev_err(&d40c->chan.dev->device, | ||
1721 | "[%s] Cannot free unallocated channel\n", __func__); | ||
1722 | return; | ||
1723 | } | ||
1724 | |||
1725 | |||
1713 | spin_lock_irqsave(&d40c->lock, flags); | 1726 | spin_lock_irqsave(&d40c->lock, flags); |
1714 | 1727 | ||
1715 | err = d40_free_dma(d40c); | 1728 | err = d40_free_dma(d40c); |
@@ -1732,6 +1745,12 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1732 | unsigned long flags; | 1745 | unsigned long flags; |
1733 | int err = 0; | 1746 | int err = 0; |
1734 | 1747 | ||
1748 | if (d40c->phy_chan == NULL) { | ||
1749 | dev_err(&d40c->chan.dev->device, | ||
1750 | "[%s] Channel is not allocated.\n", __func__); | ||
1751 | return ERR_PTR(-EINVAL); | ||
1752 | } | ||
1753 | |||
1735 | spin_lock_irqsave(&d40c->lock, flags); | 1754 | spin_lock_irqsave(&d40c->lock, flags); |
1736 | d40d = d40_desc_get(d40c); | 1755 | d40d = d40_desc_get(d40c); |
1737 | 1756 | ||
@@ -1947,6 +1966,12 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
1947 | unsigned long flags; | 1966 | unsigned long flags; |
1948 | int err; | 1967 | int err; |
1949 | 1968 | ||
1969 | if (d40c->phy_chan == NULL) { | ||
1970 | dev_err(&d40c->chan.dev->device, | ||
1971 | "[%s] Cannot prepare unallocated channel\n", __func__); | ||
1972 | return ERR_PTR(-EINVAL); | ||
1973 | } | ||
1974 | |||
1950 | if (d40c->dma_cfg.pre_transfer) | 1975 | if (d40c->dma_cfg.pre_transfer) |
1951 | d40c->dma_cfg.pre_transfer(chan, | 1976 | d40c->dma_cfg.pre_transfer(chan, |
1952 | d40c->dma_cfg.pre_transfer_data, | 1977 | d40c->dma_cfg.pre_transfer_data, |
@@ -1993,6 +2018,13 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
1993 | dma_cookie_t last_complete; | 2018 | dma_cookie_t last_complete; |
1994 | int ret; | 2019 | int ret; |
1995 | 2020 | ||
2021 | if (d40c->phy_chan == NULL) { | ||
2022 | dev_err(&d40c->chan.dev->device, | ||
2023 | "[%s] Cannot read status of unallocated channel\n", | ||
2024 | __func__); | ||
2025 | return -EINVAL; | ||
2026 | } | ||
2027 | |||
1996 | last_complete = d40c->completed; | 2028 | last_complete = d40c->completed; |
1997 | last_used = chan->cookie; | 2029 | last_used = chan->cookie; |
1998 | 2030 | ||
@@ -2012,6 +2044,12 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2012 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2044 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2013 | unsigned long flags; | 2045 | unsigned long flags; |
2014 | 2046 | ||
2047 | if (d40c->phy_chan == NULL) { | ||
2048 | dev_err(&d40c->chan.dev->device, | ||
2049 | "[%s] Channel is not allocated!\n", __func__); | ||
2050 | return; | ||
2051 | } | ||
2052 | |||
2015 | spin_lock_irqsave(&d40c->lock, flags); | 2053 | spin_lock_irqsave(&d40c->lock, flags); |
2016 | 2054 | ||
2017 | /* Busy means that pending jobs are already being processed */ | 2055 | /* Busy means that pending jobs are already being processed */ |
@@ -2027,6 +2065,12 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
2027 | unsigned long flags; | 2065 | unsigned long flags; |
2028 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2066 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2029 | 2067 | ||
2068 | if (d40c->phy_chan == NULL) { | ||
2069 | dev_err(&d40c->chan.dev->device, | ||
2070 | "[%s] Channel is not allocated!\n", __func__); | ||
2071 | return -EINVAL; | ||
2072 | } | ||
2073 | |||
2030 | switch (cmd) { | 2074 | switch (cmd) { |
2031 | case DMA_TERMINATE_ALL: | 2075 | case DMA_TERMINATE_ALL: |
2032 | spin_lock_irqsave(&d40c->lock, flags); | 2076 | spin_lock_irqsave(&d40c->lock, flags); |