diff options
Diffstat (limited to 'drivers/dma/shdma.c')
-rw-r--r-- | drivers/dma/shdma.c | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 034ecf0ace03..2e4a54c8afeb 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -80,17 +80,17 @@ static int sh_dmae_rst(int id) | |||
80 | unsigned short dmaor; | 80 | unsigned short dmaor; |
81 | 81 | ||
82 | sh_dmae_ctl_stop(id); | 82 | sh_dmae_ctl_stop(id); |
83 | dmaor = (dmaor_read_reg(id)|DMAOR_INIT); | 83 | dmaor = dmaor_read_reg(id) | DMAOR_INIT; |
84 | 84 | ||
85 | dmaor_write_reg(id, dmaor); | 85 | dmaor_write_reg(id, dmaor); |
86 | if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { | 86 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { |
87 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | 87 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); |
88 | return -EINVAL; | 88 | return -EINVAL; |
89 | } | 89 | } |
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
92 | 92 | ||
93 | static int dmae_is_idle(struct sh_dmae_chan *sh_chan) | 93 | static int dmae_is_busy(struct sh_dmae_chan *sh_chan) |
94 | { | 94 | { |
95 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 95 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
96 | if (chcr & CHCR_DE) { | 96 | if (chcr & CHCR_DE) { |
@@ -110,15 +110,14 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) | |||
110 | { | 110 | { |
111 | sh_dmae_writel(sh_chan, hw.sar, SAR); | 111 | sh_dmae_writel(sh_chan, hw.sar, SAR); |
112 | sh_dmae_writel(sh_chan, hw.dar, DAR); | 112 | sh_dmae_writel(sh_chan, hw.dar, DAR); |
113 | sh_dmae_writel(sh_chan, | 113 | sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR); |
114 | (hw.tcr >> calc_xmit_shift(sh_chan)), TCR); | ||
115 | } | 114 | } |
116 | 115 | ||
117 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 116 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
118 | { | 117 | { |
119 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 118 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
120 | 119 | ||
121 | chcr |= (CHCR_DE|CHCR_IE); | 120 | chcr |= CHCR_DE | CHCR_IE; |
122 | sh_dmae_writel(sh_chan, chcr, CHCR); | 121 | sh_dmae_writel(sh_chan, chcr, CHCR); |
123 | } | 122 | } |
124 | 123 | ||
@@ -132,7 +131,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
132 | 131 | ||
133 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 132 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
134 | { | 133 | { |
135 | int ret = dmae_is_idle(sh_chan); | 134 | int ret = dmae_is_busy(sh_chan); |
136 | /* When DMA was working, can not set data to CHCR */ | 135 | /* When DMA was working, can not set data to CHCR */ |
137 | if (ret) | 136 | if (ret) |
138 | return ret; | 137 | return ret; |
@@ -149,7 +148,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
149 | { | 148 | { |
150 | u32 addr; | 149 | u32 addr; |
151 | int shift = 0; | 150 | int shift = 0; |
152 | int ret = dmae_is_idle(sh_chan); | 151 | int ret = dmae_is_busy(sh_chan); |
153 | if (ret) | 152 | if (ret) |
154 | return ret; | 153 | return ret; |
155 | 154 | ||
@@ -307,7 +306,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
307 | new = sh_dmae_get_desc(sh_chan); | 306 | new = sh_dmae_get_desc(sh_chan); |
308 | if (!new) { | 307 | if (!new) { |
309 | dev_err(sh_chan->dev, | 308 | dev_err(sh_chan->dev, |
310 | "No free memory for link descriptor\n"); | 309 | "No free memory for link descriptor\n"); |
311 | goto err_get_desc; | 310 | goto err_get_desc; |
312 | } | 311 | } |
313 | 312 | ||
@@ -388,7 +387,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
388 | struct sh_dmae_regs hw; | 387 | struct sh_dmae_regs hw; |
389 | 388 | ||
390 | /* DMA work check */ | 389 | /* DMA work check */ |
391 | if (dmae_is_idle(sh_chan)) | 390 | if (dmae_is_busy(sh_chan)) |
392 | return; | 391 | return; |
393 | 392 | ||
394 | /* Find the first un-transfer desciptor */ | 393 | /* Find the first un-transfer desciptor */ |
@@ -497,8 +496,9 @@ static void dmae_do_tasklet(unsigned long data) | |||
497 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 496 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
498 | struct sh_desc *desc, *_desc, *cur_desc = NULL; | 497 | struct sh_desc *desc, *_desc, *cur_desc = NULL; |
499 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 498 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
499 | |||
500 | list_for_each_entry_safe(desc, _desc, | 500 | list_for_each_entry_safe(desc, _desc, |
501 | &sh_chan->ld_queue, node) { | 501 | &sh_chan->ld_queue, node) { |
502 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { | 502 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { |
503 | cur_desc = desc; | 503 | cur_desc = desc; |
504 | break; | 504 | break; |
@@ -543,8 +543,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
543 | /* alloc channel */ | 543 | /* alloc channel */ |
544 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | 544 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); |
545 | if (!new_sh_chan) { | 545 | if (!new_sh_chan) { |
546 | dev_err(shdev->common.dev, "No free memory for allocating " | 546 | dev_err(shdev->common.dev, |
547 | "dma channels!\n"); | 547 | "No free memory for allocating dma channels!\n"); |
548 | return -ENOMEM; | 548 | return -ENOMEM; |
549 | } | 549 | } |
550 | 550 | ||
@@ -586,8 +586,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
586 | "sh-dmae%d", new_sh_chan->id); | 586 | "sh-dmae%d", new_sh_chan->id); |
587 | 587 | ||
588 | /* set up channel irq */ | 588 | /* set up channel irq */ |
589 | err = request_irq(irq, &sh_dmae_interrupt, | 589 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, |
590 | irqflags, new_sh_chan->dev_id, new_sh_chan); | 590 | new_sh_chan->dev_id, new_sh_chan); |
591 | if (err) { | 591 | if (err) { |
592 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 592 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
593 | "with return %d\n", id, err); | 593 | "with return %d\n", id, err); |
@@ -676,6 +676,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
676 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; | 676 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; |
677 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | 677 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
678 | shdev->common.dev = &pdev->dev; | 678 | shdev->common.dev = &pdev->dev; |
679 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
680 | shdev->common.copy_align = 5; | ||
679 | 681 | ||
680 | #if defined(CONFIG_CPU_SH4) | 682 | #if defined(CONFIG_CPU_SH4) |
681 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ | 683 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ |
@@ -688,8 +690,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
688 | } | 690 | } |
689 | 691 | ||
690 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { | 692 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { |
691 | err = request_irq(eirq[ecnt], sh_dmae_err, | 693 | err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, |
692 | irqflags, "DMAC Address Error", shdev); | 694 | "DMAC Address Error", shdev); |
693 | if (err) { | 695 | if (err) { |
694 | dev_err(&pdev->dev, "DMA device request_irq" | 696 | dev_err(&pdev->dev, "DMA device request_irq" |
695 | "error (irq %d) with return %d\n", | 697 | "error (irq %d) with return %d\n", |