aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2009-12-10 12:35:07 -0500
committerDan Williams <dan.j.williams@intel.com>2009-12-11 01:32:09 -0500
commit86d61b33e48f1da5a6b310d3de93187db62ab72a (patch)
tree9d82b881df70b3832111c9f79da69fcb60aa079b
parentcfe4f2751ef1a5390b56c5d263f90b6ff138ba31 (diff)
sh: stylistic improvements for the DMA driver
Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dma/shdma.c34
-rw-r--r--drivers/dma/shdma.h14
2 files changed, 24 insertions, 24 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index f5fae1258b54..8d9acd751ed6 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -80,17 +80,17 @@ static int sh_dmae_rst(int id)
80 unsigned short dmaor; 80 unsigned short dmaor;
81 81
82 sh_dmae_ctl_stop(id); 82 sh_dmae_ctl_stop(id);
83 dmaor = (dmaor_read_reg(id)|DMAOR_INIT); 83 dmaor = dmaor_read_reg(id) | DMAOR_INIT;
84 84
85 dmaor_write_reg(id, dmaor); 85 dmaor_write_reg(id, dmaor);
86 if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { 86 if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
87 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); 87 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
88 return -EINVAL; 88 return -EINVAL;
89 } 89 }
90 return 0; 90 return 0;
91} 91}
92 92
93static int dmae_is_idle(struct sh_dmae_chan *sh_chan) 93static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
94{ 94{
95 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 95 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
96 if (chcr & CHCR_DE) { 96 if (chcr & CHCR_DE) {
@@ -110,15 +110,14 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
110{ 110{
111 sh_dmae_writel(sh_chan, hw.sar, SAR); 111 sh_dmae_writel(sh_chan, hw.sar, SAR);
112 sh_dmae_writel(sh_chan, hw.dar, DAR); 112 sh_dmae_writel(sh_chan, hw.dar, DAR);
113 sh_dmae_writel(sh_chan, 113 sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
114 (hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
115} 114}
116 115
117static void dmae_start(struct sh_dmae_chan *sh_chan) 116static void dmae_start(struct sh_dmae_chan *sh_chan)
118{ 117{
119 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 118 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
120 119
121 chcr |= (CHCR_DE|CHCR_IE); 120 chcr |= CHCR_DE | CHCR_IE;
122 sh_dmae_writel(sh_chan, chcr, CHCR); 121 sh_dmae_writel(sh_chan, chcr, CHCR);
123} 122}
124 123
@@ -132,7 +131,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
132 131
133static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 132static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
134{ 133{
135 int ret = dmae_is_idle(sh_chan); 134 int ret = dmae_is_busy(sh_chan);
136 /* When DMA was working, can not set data to CHCR */ 135 /* When DMA was working, can not set data to CHCR */
137 if (ret) 136 if (ret)
138 return ret; 137 return ret;
@@ -149,7 +148,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
149{ 148{
150 u32 addr; 149 u32 addr;
151 int shift = 0; 150 int shift = 0;
152 int ret = dmae_is_idle(sh_chan); 151 int ret = dmae_is_busy(sh_chan);
153 if (ret) 152 if (ret)
154 return ret; 153 return ret;
155 154
@@ -307,7 +306,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
307 new = sh_dmae_get_desc(sh_chan); 306 new = sh_dmae_get_desc(sh_chan);
308 if (!new) { 307 if (!new) {
309 dev_err(sh_chan->dev, 308 dev_err(sh_chan->dev,
310 "No free memory for link descriptor\n"); 309 "No free memory for link descriptor\n");
311 goto err_get_desc; 310 goto err_get_desc;
312 } 311 }
313 312
@@ -388,7 +387,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
388 struct sh_dmae_regs hw; 387 struct sh_dmae_regs hw;
389 388
390 /* DMA work check */ 389 /* DMA work check */
391 if (dmae_is_idle(sh_chan)) 390 if (dmae_is_busy(sh_chan))
392 return; 391 return;
393 392
394 /* Find the first un-transfer desciptor */ 393 /* Find the first un-transfer desciptor */
@@ -497,8 +496,9 @@ static void dmae_do_tasklet(unsigned long data)
497 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 496 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
498 struct sh_desc *desc, *_desc, *cur_desc = NULL; 497 struct sh_desc *desc, *_desc, *cur_desc = NULL;
499 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 498 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
499
500 list_for_each_entry_safe(desc, _desc, 500 list_for_each_entry_safe(desc, _desc,
501 &sh_chan->ld_queue, node) { 501 &sh_chan->ld_queue, node) {
502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { 502 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
503 cur_desc = desc; 503 cur_desc = desc;
504 break; 504 break;
@@ -543,8 +543,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
543 /* alloc channel */ 543 /* alloc channel */
544 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 544 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
545 if (!new_sh_chan) { 545 if (!new_sh_chan) {
546 dev_err(shdev->common.dev, "No free memory for allocating " 546 dev_err(shdev->common.dev,
547 "dma channels!\n"); 547 "No free memory for allocating dma channels!\n");
548 return -ENOMEM; 548 return -ENOMEM;
549 } 549 }
550 550
@@ -586,8 +586,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
586 "sh-dmae%d", new_sh_chan->id); 586 "sh-dmae%d", new_sh_chan->id);
587 587
588 /* set up channel irq */ 588 /* set up channel irq */
589 err = request_irq(irq, &sh_dmae_interrupt, 589 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
590 irqflags, new_sh_chan->dev_id, new_sh_chan); 590 new_sh_chan->dev_id, new_sh_chan);
591 if (err) { 591 if (err) {
592 dev_err(shdev->common.dev, "DMA channel %d request_irq error " 592 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
593 "with return %d\n", id, err); 593 "with return %d\n", id, err);
@@ -691,8 +691,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
691 } 691 }
692 692
693 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { 693 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
694 err = request_irq(eirq[ecnt], sh_dmae_err, 694 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
695 irqflags, "DMAC Address Error", shdev); 695 "DMAC Address Error", shdev);
696 if (err) { 696 if (err) {
697 dev_err(&pdev->dev, "DMA device request_irq" 697 dev_err(&pdev->dev, "DMA device request_irq"
698 "error (irq %d) with return %d\n", 698 "error (irq %d) with return %d\n",
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b4bc15a2c0a..60b81e529b42 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -35,15 +35,15 @@ struct sh_desc {
35 35
36struct sh_dmae_chan { 36struct sh_dmae_chan {
37 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 37 dma_cookie_t completed_cookie; /* The maximum cookie completed */
38 spinlock_t desc_lock; /* Descriptor operation lock */ 38 spinlock_t desc_lock; /* Descriptor operation lock */
39 struct list_head ld_queue; /* Link descriptors queue */ 39 struct list_head ld_queue; /* Link descriptors queue */
40 struct list_head ld_free; /* Link descriptors free */ 40 struct list_head ld_free; /* Link descriptors free */
41 struct dma_chan common; /* DMA common channel */ 41 struct dma_chan common; /* DMA common channel */
42 struct device *dev; /* Channel device */ 42 struct device *dev; /* Channel device */
43 struct tasklet_struct tasklet; /* Tasklet */ 43 struct tasklet_struct tasklet; /* Tasklet */
44 int descs_allocated; /* desc count */ 44 int descs_allocated; /* desc count */
45 int id; /* Raw id of this channel */ 45 int id; /* Raw id of this channel */
46 char dev_id[16]; /* unique name per DMAC of channel */ 46 char dev_id[16]; /* unique name per DMAC of channel */
47 47
48 /* Set chcr */ 48 /* Set chcr */
49 int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); 49 int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);