aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-10-03 00:05:55 -0400
committerVinod Koul <vinod.koul@intel.com>2016-10-03 00:05:55 -0400
commitf2469114c66158b36143a091255b4ed2d61fab7c (patch)
tree82c88ef794cbad8e425ac17674c29d2a243e9bc8
parent709c9464c335e1f8e22ee108def77d53330a95c7 (diff)
parent4d8673a0494a0f20bead7aea7dc5906c74451554 (diff)
Merge branch 'topic/ste_dma40' into for-linus
-rw-r--r--drivers/dma/ste_dma40.c253
1 files changed, 122 insertions, 131 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 08f3d7be2df0..8684d11b29bb 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -874,7 +874,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
874 } 874 }
875 875
876 if (curr_lcla < 0) 876 if (curr_lcla < 0)
877 goto out; 877 goto set_current;
878 878
879 for (; lli_current < lli_len; lli_current++) { 879 for (; lli_current < lli_len; lli_current++) {
880 unsigned int lcla_offset = chan->phy_chan->num * 1024 + 880 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
@@ -925,8 +925,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
925 break; 925 break;
926 } 926 }
927 } 927 }
928 928 set_current:
929out:
930 desc->lli_current = lli_current; 929 desc->lli_current = lli_current;
931} 930}
932 931
@@ -1057,7 +1056,7 @@ static int __d40_execute_command_phy(struct d40_chan *d40c,
1057 D40_CHAN_POS(d40c->phy_chan->num); 1056 D40_CHAN_POS(d40c->phy_chan->num);
1058 1057
1059 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 1058 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1060 goto done; 1059 goto unlock;
1061 } 1060 }
1062 1061
1063 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); 1062 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
@@ -1093,7 +1092,7 @@ static int __d40_execute_command_phy(struct d40_chan *d40c,
1093 } 1092 }
1094 1093
1095 } 1094 }
1096done: 1095 unlock:
1097 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); 1096 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1098 return ret; 1097 return ret;
1099} 1098}
@@ -1580,7 +1579,7 @@ static void dma_tasklet(unsigned long data)
1580 /* Check if we have reached here for cyclic job */ 1579 /* Check if we have reached here for cyclic job */
1581 d40d = d40_first_active_get(d40c); 1580 d40d = d40_first_active_get(d40c);
1582 if (d40d == NULL || !d40d->cyclic) 1581 if (d40d == NULL || !d40d->cyclic)
1583 goto err; 1582 goto check_pending_tx;
1584 } 1583 }
1585 1584
1586 if (!d40d->cyclic) 1585 if (!d40d->cyclic)
@@ -1622,8 +1621,7 @@ static void dma_tasklet(unsigned long data)
1622 dmaengine_desc_callback_invoke(&cb, NULL); 1621 dmaengine_desc_callback_invoke(&cb, NULL);
1623 1622
1624 return; 1623 return;
1625 1624 check_pending_tx:
1626err:
1627 /* Rescue manouver if receiving double interrupts */ 1625 /* Rescue manouver if receiving double interrupts */
1628 if (d40c->pending_tx > 0) 1626 if (d40c->pending_tx > 0)
1629 d40c->pending_tx--; 1627 d40c->pending_tx--;
@@ -1752,42 +1750,40 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1752 phy->allocated_dst == D40_ALLOC_FREE) { 1750 phy->allocated_dst == D40_ALLOC_FREE) {
1753 phy->allocated_dst = D40_ALLOC_PHY; 1751 phy->allocated_dst = D40_ALLOC_PHY;
1754 phy->allocated_src = D40_ALLOC_PHY; 1752 phy->allocated_src = D40_ALLOC_PHY;
1755 goto found; 1753 goto found_unlock;
1756 } else 1754 } else
1757 goto not_found; 1755 goto not_found_unlock;
1758 } 1756 }
1759 1757
1760 /* Logical channel */ 1758 /* Logical channel */
1761 if (is_src) { 1759 if (is_src) {
1762 if (phy->allocated_src == D40_ALLOC_PHY) 1760 if (phy->allocated_src == D40_ALLOC_PHY)
1763 goto not_found; 1761 goto not_found_unlock;
1764 1762
1765 if (phy->allocated_src == D40_ALLOC_FREE) 1763 if (phy->allocated_src == D40_ALLOC_FREE)
1766 phy->allocated_src = D40_ALLOC_LOG_FREE; 1764 phy->allocated_src = D40_ALLOC_LOG_FREE;
1767 1765
1768 if (!(phy->allocated_src & BIT(log_event_line))) { 1766 if (!(phy->allocated_src & BIT(log_event_line))) {
1769 phy->allocated_src |= BIT(log_event_line); 1767 phy->allocated_src |= BIT(log_event_line);
1770 goto found; 1768 goto found_unlock;
1771 } else 1769 } else
1772 goto not_found; 1770 goto not_found_unlock;
1773 } else { 1771 } else {
1774 if (phy->allocated_dst == D40_ALLOC_PHY) 1772 if (phy->allocated_dst == D40_ALLOC_PHY)
1775 goto not_found; 1773 goto not_found_unlock;
1776 1774
1777 if (phy->allocated_dst == D40_ALLOC_FREE) 1775 if (phy->allocated_dst == D40_ALLOC_FREE)
1778 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1776 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1779 1777
1780 if (!(phy->allocated_dst & BIT(log_event_line))) { 1778 if (!(phy->allocated_dst & BIT(log_event_line))) {
1781 phy->allocated_dst |= BIT(log_event_line); 1779 phy->allocated_dst |= BIT(log_event_line);
1782 goto found; 1780 goto found_unlock;
1783 } else 1781 }
1784 goto not_found;
1785 } 1782 }
1786 1783 not_found_unlock:
1787not_found:
1788 spin_unlock_irqrestore(&phy->lock, flags); 1784 spin_unlock_irqrestore(&phy->lock, flags);
1789 return false; 1785 return false;
1790found: 1786 found_unlock:
1791 spin_unlock_irqrestore(&phy->lock, flags); 1787 spin_unlock_irqrestore(&phy->lock, flags);
1792 return true; 1788 return true;
1793} 1789}
@@ -1803,7 +1799,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1803 phy->allocated_dst = D40_ALLOC_FREE; 1799 phy->allocated_dst = D40_ALLOC_FREE;
1804 phy->allocated_src = D40_ALLOC_FREE; 1800 phy->allocated_src = D40_ALLOC_FREE;
1805 is_free = true; 1801 is_free = true;
1806 goto out; 1802 goto unlock;
1807 } 1803 }
1808 1804
1809 /* Logical channel */ 1805 /* Logical channel */
@@ -1819,8 +1815,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1819 1815
1820 is_free = ((phy->allocated_src | phy->allocated_dst) == 1816 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1821 D40_ALLOC_FREE); 1817 D40_ALLOC_FREE);
1822 1818 unlock:
1823out:
1824 spin_unlock_irqrestore(&phy->lock, flags); 1819 spin_unlock_irqrestore(&phy->lock, flags);
1825 1820
1826 return is_free; 1821 return is_free;
@@ -2019,7 +2014,7 @@ static int d40_free_dma(struct d40_chan *d40c)
2019 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 2014 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2020 if (res) { 2015 if (res) {
2021 chan_err(d40c, "stop failed\n"); 2016 chan_err(d40c, "stop failed\n");
2022 goto out; 2017 goto mark_last_busy;
2023 } 2018 }
2024 2019
2025 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); 2020 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
@@ -2037,8 +2032,7 @@ static int d40_free_dma(struct d40_chan *d40c)
2037 d40c->busy = false; 2032 d40c->busy = false;
2038 d40c->phy_chan = NULL; 2033 d40c->phy_chan = NULL;
2039 d40c->configured = false; 2034 d40c->configured = false;
2040out: 2035 mark_last_busy:
2041
2042 pm_runtime_mark_last_busy(d40c->base->dev); 2036 pm_runtime_mark_last_busy(d40c->base->dev);
2043 pm_runtime_put_autosuspend(d40c->base->dev); 2037 pm_runtime_put_autosuspend(d40c->base->dev);
2044 return res; 2038 return res;
@@ -2066,8 +2060,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
2066 D40_CHAN_POS(d40c->phy_chan->num); 2060 D40_CHAN_POS(d40c->phy_chan->num);
2067 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 2061 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2068 is_paused = true; 2062 is_paused = true;
2069 2063 goto unlock;
2070 goto _exit;
2071 } 2064 }
2072 2065
2073 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2066 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
@@ -2077,7 +2070,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
2077 status = readl(chanbase + D40_CHAN_REG_SSLNK); 2070 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2078 } else { 2071 } else {
2079 chan_err(d40c, "Unknown direction\n"); 2072 chan_err(d40c, "Unknown direction\n");
2080 goto _exit; 2073 goto unlock;
2081 } 2074 }
2082 2075
2083 status = (status & D40_EVENTLINE_MASK(event)) >> 2076 status = (status & D40_EVENTLINE_MASK(event)) >>
@@ -2085,7 +2078,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
2085 2078
2086 if (status != D40_DMA_RUN) 2079 if (status != D40_DMA_RUN)
2087 is_paused = true; 2080 is_paused = true;
2088_exit: 2081 unlock:
2089 spin_unlock_irqrestore(&d40c->lock, flags); 2082 spin_unlock_irqrestore(&d40c->lock, flags);
2090 return is_paused; 2083 return is_paused;
2091 2084
@@ -2170,7 +2163,7 @@ static struct d40_desc *
2170d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, 2163d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2171 unsigned int sg_len, unsigned long dma_flags) 2164 unsigned int sg_len, unsigned long dma_flags)
2172{ 2165{
2173 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2166 struct stedma40_chan_cfg *cfg;
2174 struct d40_desc *desc; 2167 struct d40_desc *desc;
2175 int ret; 2168 int ret;
2176 2169
@@ -2178,17 +2171,18 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2178 if (!desc) 2171 if (!desc)
2179 return NULL; 2172 return NULL;
2180 2173
2174 cfg = &chan->dma_cfg;
2181 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, 2175 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2182 cfg->dst_info.data_width); 2176 cfg->dst_info.data_width);
2183 if (desc->lli_len < 0) { 2177 if (desc->lli_len < 0) {
2184 chan_err(chan, "Unaligned size\n"); 2178 chan_err(chan, "Unaligned size\n");
2185 goto err; 2179 goto free_desc;
2186 } 2180 }
2187 2181
2188 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); 2182 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2189 if (ret < 0) { 2183 if (ret < 0) {
2190 chan_err(chan, "Could not allocate lli\n"); 2184 chan_err(chan, "Could not allocate lli\n");
2191 goto err; 2185 goto free_desc;
2192 } 2186 }
2193 2187
2194 desc->lli_current = 0; 2188 desc->lli_current = 0;
@@ -2198,8 +2192,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2198 dma_async_tx_descriptor_init(&desc->txd, &chan->chan); 2192 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2199 2193
2200 return desc; 2194 return desc;
2201 2195 free_desc:
2202err:
2203 d40_desc_free(chan, desc); 2196 d40_desc_free(chan, desc);
2204 return NULL; 2197 return NULL;
2205} 2198}
@@ -2210,8 +2203,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2210 enum dma_transfer_direction direction, unsigned long dma_flags) 2203 enum dma_transfer_direction direction, unsigned long dma_flags)
2211{ 2204{
2212 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); 2205 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2213 dma_addr_t src_dev_addr = 0; 2206 dma_addr_t src_dev_addr;
2214 dma_addr_t dst_dev_addr = 0; 2207 dma_addr_t dst_dev_addr;
2215 struct d40_desc *desc; 2208 struct d40_desc *desc;
2216 unsigned long flags; 2209 unsigned long flags;
2217 int ret; 2210 int ret;
@@ -2225,11 +2218,13 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2225 2218
2226 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); 2219 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2227 if (desc == NULL) 2220 if (desc == NULL)
2228 goto err; 2221 goto unlock;
2229 2222
2230 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2223 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2231 desc->cyclic = true; 2224 desc->cyclic = true;
2232 2225
2226 src_dev_addr = 0;
2227 dst_dev_addr = 0;
2233 if (direction == DMA_DEV_TO_MEM) 2228 if (direction == DMA_DEV_TO_MEM)
2234 src_dev_addr = chan->runtime_addr; 2229 src_dev_addr = chan->runtime_addr;
2235 else if (direction == DMA_MEM_TO_DEV) 2230 else if (direction == DMA_MEM_TO_DEV)
@@ -2245,7 +2240,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2245 if (ret) { 2240 if (ret) {
2246 chan_err(chan, "Failed to prepare %s sg job: %d\n", 2241 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2247 chan_is_logical(chan) ? "log" : "phy", ret); 2242 chan_is_logical(chan) ? "log" : "phy", ret);
2248 goto err; 2243 goto free_desc;
2249 } 2244 }
2250 2245
2251 /* 2246 /*
@@ -2257,10 +2252,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2257 spin_unlock_irqrestore(&chan->lock, flags); 2252 spin_unlock_irqrestore(&chan->lock, flags);
2258 2253
2259 return &desc->txd; 2254 return &desc->txd;
2260 2255 free_desc:
2261err: 2256 d40_desc_free(chan, desc);
2262 if (desc) 2257 unlock:
2263 d40_desc_free(chan, desc);
2264 spin_unlock_irqrestore(&chan->lock, flags); 2258 spin_unlock_irqrestore(&chan->lock, flags);
2265 return NULL; 2259 return NULL;
2266} 2260}
@@ -2398,7 +2392,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2398 err = d40_config_memcpy(d40c); 2392 err = d40_config_memcpy(d40c);
2399 if (err) { 2393 if (err) {
2400 chan_err(d40c, "Failed to configure memcpy channel\n"); 2394 chan_err(d40c, "Failed to configure memcpy channel\n");
2401 goto fail; 2395 goto mark_last_busy;
2402 } 2396 }
2403 } 2397 }
2404 2398
@@ -2406,7 +2400,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2406 if (err) { 2400 if (err) {
2407 chan_err(d40c, "Failed to allocate channel\n"); 2401 chan_err(d40c, "Failed to allocate channel\n");
2408 d40c->configured = false; 2402 d40c->configured = false;
2409 goto fail; 2403 goto mark_last_busy;
2410 } 2404 }
2411 2405
2412 pm_runtime_get_sync(d40c->base->dev); 2406 pm_runtime_get_sync(d40c->base->dev);
@@ -2440,7 +2434,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2440 */ 2434 */
2441 if (is_free_phy) 2435 if (is_free_phy)
2442 d40_config_write(d40c); 2436 d40_config_write(d40c);
2443fail: 2437 mark_last_busy:
2444 pm_runtime_mark_last_busy(d40c->base->dev); 2438 pm_runtime_mark_last_busy(d40c->base->dev);
2445 pm_runtime_put_autosuspend(d40c->base->dev); 2439 pm_runtime_put_autosuspend(d40c->base->dev);
2446 spin_unlock_irqrestore(&d40c->lock, flags); 2440 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -2863,7 +2857,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2863 2857
2864 if (err) { 2858 if (err) {
2865 d40_err(base->dev, "Failed to register slave channels\n"); 2859 d40_err(base->dev, "Failed to register slave channels\n");
2866 goto failure1; 2860 goto exit;
2867 } 2861 }
2868 2862
2869 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2863 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
@@ -2880,7 +2874,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2880 if (err) { 2874 if (err) {
2881 d40_err(base->dev, 2875 d40_err(base->dev,
2882 "Failed to register memcpy only channels\n"); 2876 "Failed to register memcpy only channels\n");
2883 goto failure2; 2877 goto unregister_slave;
2884 } 2878 }
2885 2879
2886 d40_chan_init(base, &base->dma_both, base->phy_chans, 2880 d40_chan_init(base, &base->dma_both, base->phy_chans,
@@ -2898,14 +2892,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2898 if (err) { 2892 if (err) {
2899 d40_err(base->dev, 2893 d40_err(base->dev,
2900 "Failed to register logical and physical capable channels\n"); 2894 "Failed to register logical and physical capable channels\n");
2901 goto failure3; 2895 goto unregister_memcpy;
2902 } 2896 }
2903 return 0; 2897 return 0;
2904failure3: 2898 unregister_memcpy:
2905 dma_async_device_unregister(&base->dma_memcpy); 2899 dma_async_device_unregister(&base->dma_memcpy);
2906failure2: 2900 unregister_slave:
2907 dma_async_device_unregister(&base->dma_slave); 2901 dma_async_device_unregister(&base->dma_slave);
2908failure1: 2902 exit:
2909 return err; 2903 return err;
2910} 2904}
2911 2905
@@ -3116,11 +3110,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
3116static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 3110static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3117{ 3111{
3118 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); 3112 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3119 struct clk *clk = NULL; 3113 struct clk *clk;
3120 void __iomem *virtbase = NULL; 3114 void __iomem *virtbase;
3121 struct resource *res = NULL; 3115 struct resource *res;
3122 struct d40_base *base = NULL; 3116 struct d40_base *base;
3123 int num_log_chans = 0; 3117 int num_log_chans;
3124 int num_phy_chans; 3118 int num_phy_chans;
3125 int num_memcpy_chans; 3119 int num_memcpy_chans;
3126 int clk_ret = -EINVAL; 3120 int clk_ret = -EINVAL;
@@ -3132,27 +3126,27 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3132 clk = clk_get(&pdev->dev, NULL); 3126 clk = clk_get(&pdev->dev, NULL);
3133 if (IS_ERR(clk)) { 3127 if (IS_ERR(clk)) {
3134 d40_err(&pdev->dev, "No matching clock found\n"); 3128 d40_err(&pdev->dev, "No matching clock found\n");
3135 goto failure; 3129 goto check_prepare_enabled;
3136 } 3130 }
3137 3131
3138 clk_ret = clk_prepare_enable(clk); 3132 clk_ret = clk_prepare_enable(clk);
3139 if (clk_ret) { 3133 if (clk_ret) {
3140 d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); 3134 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3141 goto failure; 3135 goto disable_unprepare;
3142 } 3136 }
3143 3137
3144 /* Get IO for DMAC base address */ 3138 /* Get IO for DMAC base address */
3145 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 3139 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3146 if (!res) 3140 if (!res)
3147 goto failure; 3141 goto disable_unprepare;
3148 3142
3149 if (request_mem_region(res->start, resource_size(res), 3143 if (request_mem_region(res->start, resource_size(res),
3150 D40_NAME " I/O base") == NULL) 3144 D40_NAME " I/O base") == NULL)
3151 goto failure; 3145 goto release_region;
3152 3146
3153 virtbase = ioremap(res->start, resource_size(res)); 3147 virtbase = ioremap(res->start, resource_size(res));
3154 if (!virtbase) 3148 if (!virtbase)
3155 goto failure; 3149 goto release_region;
3156 3150
3157 /* This is just a regular AMBA PrimeCell ID actually */ 3151 /* This is just a regular AMBA PrimeCell ID actually */
3158 for (pid = 0, i = 0; i < 4; i++) 3152 for (pid = 0, i = 0; i < 4; i++)
@@ -3164,13 +3158,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3164 3158
3165 if (cid != AMBA_CID) { 3159 if (cid != AMBA_CID) {
3166 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); 3160 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3167 goto failure; 3161 goto unmap_io;
3168 } 3162 }
3169 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { 3163 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3170 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 3164 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3171 AMBA_MANF_BITS(pid), 3165 AMBA_MANF_BITS(pid),
3172 AMBA_VENDOR_ST); 3166 AMBA_VENDOR_ST);
3173 goto failure; 3167 goto unmap_io;
3174 } 3168 }
3175 /* 3169 /*
3176 * HW revision: 3170 * HW revision:
@@ -3184,7 +3178,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3184 rev = AMBA_REV_BITS(pid); 3178 rev = AMBA_REV_BITS(pid);
3185 if (rev < 2) { 3179 if (rev < 2) {
3186 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); 3180 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3187 goto failure; 3181 goto unmap_io;
3188 } 3182 }
3189 3183
3190 /* The number of physical channels on this HW */ 3184 /* The number of physical channels on this HW */
@@ -3210,7 +3204,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3210 sizeof(struct d40_chan), GFP_KERNEL); 3204 sizeof(struct d40_chan), GFP_KERNEL);
3211 3205
3212 if (base == NULL) 3206 if (base == NULL)
3213 goto failure; 3207 goto unmap_io;
3214 3208
3215 base->rev = rev; 3209 base->rev = rev;
3216 base->clk = clk; 3210 base->clk = clk;
@@ -3255,65 +3249,66 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3255 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); 3249 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3256 } 3250 }
3257 3251
3258 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), 3252 base->phy_res = kcalloc(num_phy_chans,
3253 sizeof(*base->phy_res),
3259 GFP_KERNEL); 3254 GFP_KERNEL);
3260 if (!base->phy_res) 3255 if (!base->phy_res)
3261 goto failure; 3256 goto free_base;
3262 3257
3263 base->lookup_phy_chans = kzalloc(num_phy_chans * 3258 base->lookup_phy_chans = kcalloc(num_phy_chans,
3264 sizeof(struct d40_chan *), 3259 sizeof(*base->lookup_phy_chans),
3265 GFP_KERNEL); 3260 GFP_KERNEL);
3266 if (!base->lookup_phy_chans) 3261 if (!base->lookup_phy_chans)
3267 goto failure; 3262 goto free_phy_res;
3268 3263
3269 base->lookup_log_chans = kzalloc(num_log_chans * 3264 base->lookup_log_chans = kcalloc(num_log_chans,
3270 sizeof(struct d40_chan *), 3265 sizeof(*base->lookup_log_chans),
3271 GFP_KERNEL); 3266 GFP_KERNEL);
3272 if (!base->lookup_log_chans) 3267 if (!base->lookup_log_chans)
3273 goto failure; 3268 goto free_phy_chans;
3274 3269
3275 base->reg_val_backup_chan = kmalloc(base->num_phy_chans * 3270 base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
3276 sizeof(d40_backup_regs_chan), 3271 sizeof(d40_backup_regs_chan),
3277 GFP_KERNEL); 3272 GFP_KERNEL);
3278 if (!base->reg_val_backup_chan) 3273 if (!base->reg_val_backup_chan)
3279 goto failure; 3274 goto free_log_chans;
3280 3275
3281 base->lcla_pool.alloc_map = 3276 base->lcla_pool.alloc_map = kcalloc(num_phy_chans
3282 kzalloc(num_phy_chans * sizeof(struct d40_desc *) 3277 * D40_LCLA_LINK_PER_EVENT_GRP,
3283 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); 3278 sizeof(*base->lcla_pool.alloc_map),
3279 GFP_KERNEL);
3284 if (!base->lcla_pool.alloc_map) 3280 if (!base->lcla_pool.alloc_map)
3285 goto failure; 3281 goto free_backup_chan;
3286 3282
3287 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 3283 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3288 0, SLAB_HWCACHE_ALIGN, 3284 0, SLAB_HWCACHE_ALIGN,
3289 NULL); 3285 NULL);
3290 if (base->desc_slab == NULL) 3286 if (base->desc_slab == NULL)
3291 goto failure; 3287 goto free_map;
3292 3288
3293 return base; 3289 return base;
3294 3290 free_map:
3295failure: 3291 kfree(base->lcla_pool.alloc_map);
3292 free_backup_chan:
3293 kfree(base->reg_val_backup_chan);
3294 free_log_chans:
3295 kfree(base->lookup_log_chans);
3296 free_phy_chans:
3297 kfree(base->lookup_phy_chans);
3298 free_phy_res:
3299 kfree(base->phy_res);
3300 free_base:
3301 kfree(base);
3302 unmap_io:
3303 iounmap(virtbase);
3304 release_region:
3305 release_mem_region(res->start, resource_size(res));
3306 check_prepare_enabled:
3296 if (!clk_ret) 3307 if (!clk_ret)
3308 disable_unprepare:
3297 clk_disable_unprepare(clk); 3309 clk_disable_unprepare(clk);
3298 if (!IS_ERR(clk)) 3310 if (!IS_ERR(clk))
3299 clk_put(clk); 3311 clk_put(clk);
3300 if (virtbase)
3301 iounmap(virtbase);
3302 if (res)
3303 release_mem_region(res->start,
3304 resource_size(res));
3305 if (virtbase)
3306 iounmap(virtbase);
3307
3308 if (base) {
3309 kfree(base->lcla_pool.alloc_map);
3310 kfree(base->reg_val_backup_chan);
3311 kfree(base->lookup_log_chans);
3312 kfree(base->lookup_phy_chans);
3313 kfree(base->phy_res);
3314 kfree(base);
3315 }
3316
3317 return NULL; 3312 return NULL;
3318} 3313}
3319 3314
@@ -3376,20 +3371,18 @@ static int __init d40_lcla_allocate(struct d40_base *base)
3376 struct d40_lcla_pool *pool = &base->lcla_pool; 3371 struct d40_lcla_pool *pool = &base->lcla_pool;
3377 unsigned long *page_list; 3372 unsigned long *page_list;
3378 int i, j; 3373 int i, j;
3379 int ret = 0; 3374 int ret;
3380 3375
3381 /* 3376 /*
3382 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, 3377 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3383 * To full fill this hardware requirement without wasting 256 kb 3378 * To full fill this hardware requirement without wasting 256 kb
3384 * we allocate pages until we get an aligned one. 3379 * we allocate pages until we get an aligned one.
3385 */ 3380 */
3386 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, 3381 page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
3387 GFP_KERNEL); 3382 sizeof(*page_list),
3388 3383 GFP_KERNEL);
3389 if (!page_list) { 3384 if (!page_list)
3390 ret = -ENOMEM; 3385 return -ENOMEM;
3391 goto failure;
3392 }
3393 3386
3394 /* Calculating how many pages that are required */ 3387 /* Calculating how many pages that are required */
3395 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; 3388 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
@@ -3405,7 +3398,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
3405 3398
3406 for (j = 0; j < i; j++) 3399 for (j = 0; j < i; j++)
3407 free_pages(page_list[j], base->lcla_pool.pages); 3400 free_pages(page_list[j], base->lcla_pool.pages);
3408 goto failure; 3401 goto free_page_list;
3409 } 3402 }
3410 3403
3411 if ((virt_to_phys((void *)page_list[i]) & 3404 if ((virt_to_phys((void *)page_list[i]) &
@@ -3432,7 +3425,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
3432 GFP_KERNEL); 3425 GFP_KERNEL);
3433 if (!base->lcla_pool.base_unaligned) { 3426 if (!base->lcla_pool.base_unaligned) {
3434 ret = -ENOMEM; 3427 ret = -ENOMEM;
3435 goto failure; 3428 goto free_page_list;
3436 } 3429 }
3437 3430
3438 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, 3431 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
@@ -3445,12 +3438,13 @@ static int __init d40_lcla_allocate(struct d40_base *base)
3445 if (dma_mapping_error(base->dev, pool->dma_addr)) { 3438 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3446 pool->dma_addr = 0; 3439 pool->dma_addr = 0;
3447 ret = -ENOMEM; 3440 ret = -ENOMEM;
3448 goto failure; 3441 goto free_page_list;
3449 } 3442 }
3450 3443
3451 writel(virt_to_phys(base->lcla_pool.base), 3444 writel(virt_to_phys(base->lcla_pool.base),
3452 base->virtbase + D40_DREG_LCLA); 3445 base->virtbase + D40_DREG_LCLA);
3453failure: 3446 ret = 0;
3447 free_page_list:
3454 kfree(page_list); 3448 kfree(page_list);
3455 return ret; 3449 return ret;
3456} 3450}
@@ -3462,9 +3456,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
3462 int num_phy = 0, num_memcpy = 0, num_disabled = 0; 3456 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3463 const __be32 *list; 3457 const __be32 *list;
3464 3458
3465 pdata = devm_kzalloc(&pdev->dev, 3459 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
3466 sizeof(struct stedma40_platform_data),
3467 GFP_KERNEL);
3468 if (!pdata) 3460 if (!pdata)
3469 return -ENOMEM; 3461 return -ENOMEM;
3470 3462
@@ -3546,7 +3538,7 @@ static int __init d40_probe(struct platform_device *pdev)
3546 if (!res) { 3538 if (!res) {
3547 ret = -ENOENT; 3539 ret = -ENOENT;
3548 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); 3540 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3549 goto failure; 3541 goto destroy_cache;
3550 } 3542 }
3551 base->lcpa_size = resource_size(res); 3543 base->lcpa_size = resource_size(res);
3552 base->phy_lcpa = res->start; 3544 base->phy_lcpa = res->start;
@@ -3555,7 +3547,7 @@ static int __init d40_probe(struct platform_device *pdev)
3555 D40_NAME " I/O lcpa") == NULL) { 3547 D40_NAME " I/O lcpa") == NULL) {
3556 ret = -EBUSY; 3548 ret = -EBUSY;
3557 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); 3549 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3558 goto failure; 3550 goto destroy_cache;
3559 } 3551 }
3560 3552
3561 /* We make use of ESRAM memory for this. */ 3553 /* We make use of ESRAM memory for this. */
@@ -3571,7 +3563,7 @@ static int __init d40_probe(struct platform_device *pdev)
3571 if (!base->lcpa_base) { 3563 if (!base->lcpa_base) {
3572 ret = -ENOMEM; 3564 ret = -ENOMEM;
3573 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); 3565 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3574 goto failure; 3566 goto destroy_cache;
3575 } 3567 }
3576 /* If lcla has to be located in ESRAM we don't need to allocate */ 3568 /* If lcla has to be located in ESRAM we don't need to allocate */
3577 if (base->plat_data->use_esram_lcla) { 3569 if (base->plat_data->use_esram_lcla) {
@@ -3581,14 +3573,14 @@ static int __init d40_probe(struct platform_device *pdev)
3581 ret = -ENOENT; 3573 ret = -ENOENT;
3582 d40_err(&pdev->dev, 3574 d40_err(&pdev->dev,
3583 "No \"lcla_esram\" memory resource\n"); 3575 "No \"lcla_esram\" memory resource\n");
3584 goto failure; 3576 goto destroy_cache;
3585 } 3577 }
3586 base->lcla_pool.base = ioremap(res->start, 3578 base->lcla_pool.base = ioremap(res->start,
3587 resource_size(res)); 3579 resource_size(res));
3588 if (!base->lcla_pool.base) { 3580 if (!base->lcla_pool.base) {
3589 ret = -ENOMEM; 3581 ret = -ENOMEM;
3590 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); 3582 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3591 goto failure; 3583 goto destroy_cache;
3592 } 3584 }
3593 writel(res->start, base->virtbase + D40_DREG_LCLA); 3585 writel(res->start, base->virtbase + D40_DREG_LCLA);
3594 3586
@@ -3596,7 +3588,7 @@ static int __init d40_probe(struct platform_device *pdev)
3596 ret = d40_lcla_allocate(base); 3588 ret = d40_lcla_allocate(base);
3597 if (ret) { 3589 if (ret) {
3598 d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); 3590 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3599 goto failure; 3591 goto destroy_cache;
3600 } 3592 }
3601 } 3593 }
3602 3594
@@ -3607,7 +3599,7 @@ static int __init d40_probe(struct platform_device *pdev)
3607 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 3599 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3608 if (ret) { 3600 if (ret) {
3609 d40_err(&pdev->dev, "No IRQ defined\n"); 3601 d40_err(&pdev->dev, "No IRQ defined\n");
3610 goto failure; 3602 goto destroy_cache;
3611 } 3603 }
3612 3604
3613 if (base->plat_data->use_esram_lcla) { 3605 if (base->plat_data->use_esram_lcla) {
@@ -3617,7 +3609,7 @@ static int __init d40_probe(struct platform_device *pdev)
3617 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); 3609 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3618 ret = PTR_ERR(base->lcpa_regulator); 3610 ret = PTR_ERR(base->lcpa_regulator);
3619 base->lcpa_regulator = NULL; 3611 base->lcpa_regulator = NULL;
3620 goto failure; 3612 goto destroy_cache;
3621 } 3613 }
3622 3614
3623 ret = regulator_enable(base->lcpa_regulator); 3615 ret = regulator_enable(base->lcpa_regulator);
@@ -3626,7 +3618,7 @@ static int __init d40_probe(struct platform_device *pdev)
3626 "Failed to enable lcpa_regulator\n"); 3618 "Failed to enable lcpa_regulator\n");
3627 regulator_put(base->lcpa_regulator); 3619 regulator_put(base->lcpa_regulator);
3628 base->lcpa_regulator = NULL; 3620 base->lcpa_regulator = NULL;
3629 goto failure; 3621 goto destroy_cache;
3630 } 3622 }
3631 } 3623 }
3632 3624
@@ -3641,13 +3633,13 @@ static int __init d40_probe(struct platform_device *pdev)
3641 3633
3642 ret = d40_dmaengine_init(base, num_reserved_chans); 3634 ret = d40_dmaengine_init(base, num_reserved_chans);
3643 if (ret) 3635 if (ret)
3644 goto failure; 3636 goto destroy_cache;
3645 3637
3646 base->dev->dma_parms = &base->dma_parms; 3638 base->dev->dma_parms = &base->dma_parms;
3647 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); 3639 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3648 if (ret) { 3640 if (ret) {
3649 d40_err(&pdev->dev, "Failed to set dma max seg size\n"); 3641 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3650 goto failure; 3642 goto destroy_cache;
3651 } 3643 }
3652 3644
3653 d40_hw_init(base); 3645 d40_hw_init(base);
@@ -3661,8 +3653,7 @@ static int __init d40_probe(struct platform_device *pdev)
3661 3653
3662 dev_info(base->dev, "initialized\n"); 3654 dev_info(base->dev, "initialized\n");
3663 return 0; 3655 return 0;
3664 3656 destroy_cache:
3665failure:
3666 kmem_cache_destroy(base->desc_slab); 3657 kmem_cache_destroy(base->desc_slab);
3667 if (base->virtbase) 3658 if (base->virtbase)
3668 iounmap(base->virtbase); 3659 iounmap(base->virtbase);
@@ -3704,7 +3695,7 @@ failure:
3704 kfree(base->lookup_phy_chans); 3695 kfree(base->lookup_phy_chans);
3705 kfree(base->phy_res); 3696 kfree(base->phy_res);
3706 kfree(base); 3697 kfree(base);
3707report_failure: 3698 report_failure:
3708 d40_err(&pdev->dev, "probe failed\n"); 3699 d40_err(&pdev->dev, "probe failed\n");
3709 return ret; 3700 return ret;
3710} 3701}