aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@stericsson.com>2010-06-22 21:06:42 -0400
committerDan Williams <dan.j.williams@intel.com>2010-06-22 21:06:42 -0400
commitf41855929c9fdc3b4f2863ada9df3e0cf4231b5b (patch)
tree4a31c0bd50af7c13664352e929edcdd2bd5bbf26 /drivers/dma
parent6b7acd84426235c63a3c0f0b230a95064f97b0d4 (diff)
DMAENGINE: ste_dma40: support older silicon
This makes sure the DMA40 driver will also work on the oldest silicon revisions that have the on-chip memory on another location in the DB8500 and also requires explicit suspend before starting or resuming a logical channel. Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> [added parenthesis to the definition of U8500_DMA_LCPA_BASE_ED] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ste_dma40.c32
1 files changed, 28 insertions, 4 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1d21fbd419d1..21a759731ef1 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -218,6 +218,7 @@ struct d40_chan {
218 * the same physical register. 218 * the same physical register.
219 * @dev: The device structure. 219 * @dev: The device structure.
220 * @virtbase: The virtual base address of the DMA's register. 220 * @virtbase: The virtual base address of the DMA's register.
221 * @rev: silicon revision detected.
221 * @clk: Pointer to the DMA clock structure. 222 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers. 223 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map. 224 * @phy_size: Size of the DMA register map.
@@ -250,6 +251,7 @@ struct d40_base {
250 spinlock_t execmd_lock; 251 spinlock_t execmd_lock;
251 struct device *dev; 252 struct device *dev;
252 void __iomem *virtbase; 253 void __iomem *virtbase;
254 u8 rev:4;
253 struct clk *clk; 255 struct clk *clk;
254 phys_addr_t phy_start; 256 phys_addr_t phy_start;
255 resource_size_t phy_size; 257 resource_size_t phy_size;
@@ -757,6 +759,17 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
757 759
758static int d40_start(struct d40_chan *d40c) 760static int d40_start(struct d40_chan *d40c)
759{ 761{
762 if (d40c->base->rev == 0) {
763 int err;
764
765 if (d40c->log_num != D40_PHY_CHAN) {
766 err = d40_channel_execute_command(d40c,
767 D40_DMA_SUSPEND_REQ);
768 if (err)
769 return err;
770 }
771 }
772
760 if (d40c->log_num != D40_PHY_CHAN) 773 if (d40c->log_num != D40_PHY_CHAN)
761 d40_config_set_event(d40c, true); 774 d40_config_set_event(d40c, true);
762 775
@@ -1426,6 +1439,13 @@ static int d40_resume(struct dma_chan *chan)
1426 1439
1427 spin_lock_irqsave(&d40c->lock, flags); 1440 spin_lock_irqsave(&d40c->lock, flags);
1428 1441
1442 if (d40c->base->rev == 0)
1443 if (d40c->log_num != D40_PHY_CHAN) {
1444 res = d40_channel_execute_command(d40c,
1445 D40_DMA_SUSPEND_REQ);
1446 goto no_suspend;
1447 }
1448
1429 /* If bytes left to transfer or linked tx resume job */ 1449 /* If bytes left to transfer or linked tx resume job */
1430 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1450 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1431 if (d40c->log_num != D40_PHY_CHAN) 1451 if (d40c->log_num != D40_PHY_CHAN)
@@ -1433,6 +1453,7 @@ static int d40_resume(struct dma_chan *chan)
1433 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1453 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1434 } 1454 }
1435 1455
1456no_suspend:
1436 spin_unlock_irqrestore(&d40c->lock, flags); 1457 spin_unlock_irqrestore(&d40c->lock, flags);
1437 return res; 1458 return res;
1438} 1459}
@@ -2286,6 +2307,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2286 int num_log_chans = 0; 2307 int num_log_chans = 0;
2287 int num_phy_chans; 2308 int num_phy_chans;
2288 int i; 2309 int i;
2310 u32 val;
2289 2311
2290 clk = clk_get(&pdev->dev, NULL); 2312 clk = clk_get(&pdev->dev, NULL);
2291 2313
@@ -2324,12 +2346,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2324 } 2346 }
2325 } 2347 }
2326 2348
2327 i = readl(virtbase + D40_DREG_PERIPHID2); 2349 /* Get silicon revision */
2350 val = readl(virtbase + D40_DREG_PERIPHID2);
2328 2351
2329 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { 2352 if ((val & 0xf) != D40_PERIPHID2_DESIGNER) {
2330 dev_err(&pdev->dev, 2353 dev_err(&pdev->dev,
2331 "[%s] Unknown designer! Got %x wanted %x\n", 2354 "[%s] Unknown designer! Got %x wanted %x\n",
2332 __func__, i & 0xf, D40_PERIPHID2_DESIGNER); 2355 __func__, val & 0xf, D40_PERIPHID2_DESIGNER);
2333 goto failure; 2356 goto failure;
2334 } 2357 }
2335 2358
@@ -2337,7 +2360,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2337 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2360 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2338 2361
2339 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2362 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2340 (i >> 4) & 0xf, res->start); 2363 (val >> 4) & 0xf, res->start);
2341 2364
2342 plat_data = pdev->dev.platform_data; 2365 plat_data = pdev->dev.platform_data;
2343 2366
@@ -2359,6 +2382,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2359 goto failure; 2382 goto failure;
2360 } 2383 }
2361 2384
2385 base->rev = (val >> 4) & 0xf;
2362 base->clk = clk; 2386 base->clk = clk;
2363 base->num_phy_chans = num_phy_chans; 2387 base->num_phy_chans = num_phy_chans;
2364 base->num_log_chans = num_log_chans; 2388 base->num_log_chans = num_log_chans;