diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-07 18:47:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-07 18:47:19 -0500 |
commit | 4a31c08d2fecc74a630653828f5388fbb037f8c2 (patch) | |
tree | c3baf80157bab2cf6bdf3d26772001e43233aad6 /drivers/dma | |
parent | 2ddb3b15f1b46836c61cfac5b00d8f08a24236e6 (diff) | |
parent | 0272282f7cffb469cd2676dcb6e58bc942fcf8a8 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (26 commits)
sh: Convert sh to use read/update_persistent_clock
sh: Move PMB debugfs entry initialization to later stage
sh: Fix up flush_cache_vmap() on SMP.
sh: fix up MMU reset with variable PMB mapping sizes.
sh: establish PMB mappings for NUMA nodes.
sh: check for existing mappings for bolted PMB entries.
sh: fixed virt/phys mapping helpers for PMB.
sh: make pmb iomapping configurable.
sh: reworked dynamic PMB mapping.
sh: Fix up cpumask_of_pcibus() for the NUMA build.
serial: sh-sci: Tidy up build warnings.
sh: Fix up ctrl_read/write stragglers in migor setup.
serial: sh-sci: Add DMA support.
dmaengine: shdma: extend .device_terminate_all() to record partial transfer
sh: merge sh7722 and sh7724 DMA register definitions
sh: activate runtime PM for dmaengine on sh7722 and sh7724
dmaengine: shdma: add runtime PM support.
dmaengine: shdma: separate DMA headers.
dmaengine: shdma: convert to platform device resources
dmaengine: shdma: fix DMA error handling.
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/shdma.c | 500 | ||||
-rw-r--r-- | drivers/dma/shdma.h | 26 |
2 files changed, 324 insertions, 202 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index b75ce8b84c46..5d17e09cb625 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -24,8 +24,10 @@ | |||
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <cpu/dma.h> | 27 | #include <linux/pm_runtime.h> |
28 | #include <asm/dma-sh.h> | 28 | |
29 | #include <asm/dmaengine.h> | ||
30 | |||
29 | #include "shdma.h" | 31 | #include "shdma.h" |
30 | 32 | ||
31 | /* DMA descriptor control */ | 33 | /* DMA descriptor control */ |
@@ -38,30 +40,32 @@ enum sh_dmae_desc_status { | |||
38 | }; | 40 | }; |
39 | 41 | ||
40 | #define NR_DESCS_PER_CHANNEL 32 | 42 | #define NR_DESCS_PER_CHANNEL 32 |
41 | /* | 43 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
42 | * Define the default configuration for dual address memory-memory transfer. | 44 | #define LOG2_DEFAULT_XFER_SIZE 2 |
43 | * The 0x400 value represents auto-request, external->external. | ||
44 | * | ||
45 | * And this driver set 4byte burst mode. | ||
46 | * If you want to change mode, you need to change RS_DEFAULT of value. | ||
47 | * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) | ||
48 | */ | ||
49 | #define RS_DEFAULT (RS_DUAL) | ||
50 | 45 | ||
51 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | 46 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ |
52 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; | 47 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; |
53 | 48 | ||
54 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 49 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
55 | 50 | ||
56 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | ||
57 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 51 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
58 | { | 52 | { |
59 | ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); | 53 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); |
60 | } | 54 | } |
61 | 55 | ||
62 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 56 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
63 | { | 57 | { |
64 | return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); | 58 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); |
59 | } | ||
60 | |||
61 | static u16 dmaor_read(struct sh_dmae_device *shdev) | ||
62 | { | ||
63 | return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); | ||
64 | } | ||
65 | |||
66 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
67 | { | ||
68 | __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); | ||
65 | } | 69 | } |
66 | 70 | ||
67 | /* | 71 | /* |
@@ -69,24 +73,23 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | |||
69 | * | 73 | * |
70 | * SH7780 has two DMAOR register | 74 | * SH7780 has two DMAOR register |
71 | */ | 75 | */ |
72 | static void sh_dmae_ctl_stop(int id) | 76 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
73 | { | 77 | { |
74 | unsigned short dmaor = dmaor_read_reg(id); | 78 | unsigned short dmaor = dmaor_read(shdev); |
75 | 79 | ||
76 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | 80 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
77 | dmaor_write_reg(id, dmaor); | ||
78 | } | 81 | } |
79 | 82 | ||
80 | static int sh_dmae_rst(int id) | 83 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
81 | { | 84 | { |
82 | unsigned short dmaor; | 85 | unsigned short dmaor; |
83 | 86 | ||
84 | sh_dmae_ctl_stop(id); | 87 | sh_dmae_ctl_stop(shdev); |
85 | dmaor = dmaor_read_reg(id) | DMAOR_INIT; | 88 | dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; |
86 | 89 | ||
87 | dmaor_write_reg(id, dmaor); | 90 | dmaor_write(shdev, dmaor); |
88 | if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { | 91 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { |
89 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | 92 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); |
90 | return -EINVAL; | 93 | return -EINVAL; |
91 | } | 94 | } |
92 | return 0; | 95 | return 0; |
@@ -102,13 +105,36 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | |||
102 | return false; /* waiting */ | 105 | return false; /* waiting */ |
103 | } | 106 | } |
104 | 107 | ||
105 | static unsigned int ts_shift[] = TS_SHIFT; | 108 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) |
106 | static inline unsigned int calc_xmit_shift(u32 chcr) | ||
107 | { | 109 | { |
108 | int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | | 110 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
109 | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); | 111 | struct sh_dmae_device, common); |
112 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
113 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | ||
114 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | ||
115 | |||
116 | if (cnt >= pdata->ts_shift_num) | ||
117 | cnt = 0; | ||
110 | 118 | ||
111 | return ts_shift[cnt]; | 119 | return pdata->ts_shift[cnt]; |
120 | } | ||
121 | |||
122 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | ||
123 | { | ||
124 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | ||
125 | struct sh_dmae_device, common); | ||
126 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
127 | int i; | ||
128 | |||
129 | for (i = 0; i < pdata->ts_shift_num; i++) | ||
130 | if (pdata->ts_shift[i] == l2size) | ||
131 | break; | ||
132 | |||
133 | if (i == pdata->ts_shift_num) | ||
134 | i = 0; | ||
135 | |||
136 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | ||
137 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | ||
112 | } | 138 | } |
113 | 139 | ||
114 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | 140 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
@@ -136,8 +162,13 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
136 | 162 | ||
137 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 163 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
138 | { | 164 | { |
139 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | 165 | /* |
140 | sh_chan->xmit_shift = calc_xmit_shift(chcr); | 166 | * Default configuration for dual address memory-memory transfer. |
167 | * 0x400 represents auto-request. | ||
168 | */ | ||
169 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | ||
170 | LOG2_DEFAULT_XFER_SIZE); | ||
171 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | ||
141 | sh_dmae_writel(sh_chan, chcr, CHCR); | 172 | sh_dmae_writel(sh_chan, chcr, CHCR); |
142 | } | 173 | } |
143 | 174 | ||
@@ -147,37 +178,26 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | |||
147 | if (dmae_is_busy(sh_chan)) | 178 | if (dmae_is_busy(sh_chan)) |
148 | return -EBUSY; | 179 | return -EBUSY; |
149 | 180 | ||
150 | sh_chan->xmit_shift = calc_xmit_shift(val); | 181 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); |
151 | sh_dmae_writel(sh_chan, val, CHCR); | 182 | sh_dmae_writel(sh_chan, val, CHCR); |
152 | 183 | ||
153 | return 0; | 184 | return 0; |
154 | } | 185 | } |
155 | 186 | ||
156 | #define DMARS_SHIFT 8 | ||
157 | #define DMARS_CHAN_MSK 0x01 | ||
158 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 187 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
159 | { | 188 | { |
160 | u32 addr; | 189 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
161 | int shift = 0; | 190 | struct sh_dmae_device, common); |
191 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
192 | struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | ||
193 | u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); | ||
194 | int shift = chan_pdata->dmars_bit; | ||
162 | 195 | ||
163 | if (dmae_is_busy(sh_chan)) | 196 | if (dmae_is_busy(sh_chan)) |
164 | return -EBUSY; | 197 | return -EBUSY; |
165 | 198 | ||
166 | if (sh_chan->id & DMARS_CHAN_MSK) | 199 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), |
167 | shift = DMARS_SHIFT; | 200 | addr); |
168 | |||
169 | if (sh_chan->id < 6) | ||
170 | /* DMA0RS0 - DMA0RS2 */ | ||
171 | addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; | ||
172 | #ifdef SH_DMARS_BASE1 | ||
173 | else if (sh_chan->id < 12) | ||
174 | /* DMA1RS0 - DMA1RS2 */ | ||
175 | addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; | ||
176 | #endif | ||
177 | else | ||
178 | return -EINVAL; | ||
179 | |||
180 | ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); | ||
181 | 201 | ||
182 | return 0; | 202 | return 0; |
183 | } | 203 | } |
@@ -251,15 +271,15 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave( | |||
251 | struct dma_device *dma_dev = sh_chan->common.device; | 271 | struct dma_device *dma_dev = sh_chan->common.device; |
252 | struct sh_dmae_device *shdev = container_of(dma_dev, | 272 | struct sh_dmae_device *shdev = container_of(dma_dev, |
253 | struct sh_dmae_device, common); | 273 | struct sh_dmae_device, common); |
254 | struct sh_dmae_pdata *pdata = &shdev->pdata; | 274 | struct sh_dmae_pdata *pdata = shdev->pdata; |
255 | int i; | 275 | int i; |
256 | 276 | ||
257 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) | 277 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) |
258 | return NULL; | 278 | return NULL; |
259 | 279 | ||
260 | for (i = 0; i < pdata->config_num; i++) | 280 | for (i = 0; i < pdata->slave_num; i++) |
261 | if (pdata->config[i].slave_id == slave_id) | 281 | if (pdata->slave[i].slave_id == slave_id) |
262 | return pdata->config + i; | 282 | return pdata->slave + i; |
263 | 283 | ||
264 | return NULL; | 284 | return NULL; |
265 | } | 285 | } |
@@ -270,6 +290,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
270 | struct sh_desc *desc; | 290 | struct sh_desc *desc; |
271 | struct sh_dmae_slave *param = chan->private; | 291 | struct sh_dmae_slave *param = chan->private; |
272 | 292 | ||
293 | pm_runtime_get_sync(sh_chan->dev); | ||
294 | |||
273 | /* | 295 | /* |
274 | * This relies on the guarantee from dmaengine that alloc_chan_resources | 296 | * This relies on the guarantee from dmaengine that alloc_chan_resources |
275 | * never runs concurrently with itself or free_chan_resources. | 297 | * never runs concurrently with itself or free_chan_resources. |
@@ -288,9 +310,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
288 | 310 | ||
289 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 311 | dmae_set_dmars(sh_chan, cfg->mid_rid); |
290 | dmae_set_chcr(sh_chan, cfg->chcr); | 312 | dmae_set_chcr(sh_chan, cfg->chcr); |
291 | } else { | 313 | } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { |
292 | if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) | 314 | dmae_init(sh_chan); |
293 | dmae_set_chcr(sh_chan, RS_DEFAULT); | ||
294 | } | 315 | } |
295 | 316 | ||
296 | spin_lock_bh(&sh_chan->desc_lock); | 317 | spin_lock_bh(&sh_chan->desc_lock); |
@@ -312,6 +333,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
312 | } | 333 | } |
313 | spin_unlock_bh(&sh_chan->desc_lock); | 334 | spin_unlock_bh(&sh_chan->desc_lock); |
314 | 335 | ||
336 | if (!sh_chan->descs_allocated) | ||
337 | pm_runtime_put(sh_chan->dev); | ||
338 | |||
315 | return sh_chan->descs_allocated; | 339 | return sh_chan->descs_allocated; |
316 | } | 340 | } |
317 | 341 | ||
@@ -323,6 +347,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
323 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 347 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
324 | struct sh_desc *desc, *_desc; | 348 | struct sh_desc *desc, *_desc; |
325 | LIST_HEAD(list); | 349 | LIST_HEAD(list); |
350 | int descs = sh_chan->descs_allocated; | ||
326 | 351 | ||
327 | dmae_halt(sh_chan); | 352 | dmae_halt(sh_chan); |
328 | 353 | ||
@@ -343,6 +368,9 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
343 | 368 | ||
344 | spin_unlock_bh(&sh_chan->desc_lock); | 369 | spin_unlock_bh(&sh_chan->desc_lock); |
345 | 370 | ||
371 | if (descs > 0) | ||
372 | pm_runtime_put(sh_chan->dev); | ||
373 | |||
346 | list_for_each_entry_safe(desc, _desc, &list, node) | 374 | list_for_each_entry_safe(desc, _desc, &list, node) |
347 | kfree(desc); | 375 | kfree(desc); |
348 | } | 376 | } |
@@ -559,6 +587,19 @@ static void sh_dmae_terminate_all(struct dma_chan *chan) | |||
559 | if (!chan) | 587 | if (!chan) |
560 | return; | 588 | return; |
561 | 589 | ||
590 | dmae_halt(sh_chan); | ||
591 | |||
592 | spin_lock_bh(&sh_chan->desc_lock); | ||
593 | if (!list_empty(&sh_chan->ld_queue)) { | ||
594 | /* Record partial transfer */ | ||
595 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | ||
596 | struct sh_desc, node); | ||
597 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
598 | sh_chan->xmit_shift; | ||
599 | |||
600 | } | ||
601 | spin_unlock_bh(&sh_chan->desc_lock); | ||
602 | |||
562 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 603 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
563 | } | 604 | } |
564 | 605 | ||
@@ -661,7 +702,7 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | |||
661 | 702 | ||
662 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 703 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
663 | { | 704 | { |
664 | struct sh_desc *sd; | 705 | struct sh_desc *desc; |
665 | 706 | ||
666 | spin_lock_bh(&sh_chan->desc_lock); | 707 | spin_lock_bh(&sh_chan->desc_lock); |
667 | /* DMA work check */ | 708 | /* DMA work check */ |
@@ -671,10 +712,13 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
671 | } | 712 | } |
672 | 713 | ||
673 | /* Find the first not transferred desciptor */ | 714 | /* Find the first not transferred desciptor */ |
674 | list_for_each_entry(sd, &sh_chan->ld_queue, node) | 715 | list_for_each_entry(desc, &sh_chan->ld_queue, node) |
675 | if (sd->mark == DESC_SUBMITTED) { | 716 | if (desc->mark == DESC_SUBMITTED) { |
717 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
718 | desc->async_tx.cookie, sh_chan->id, | ||
719 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); | ||
676 | /* Get the ld start address from ld_queue */ | 720 | /* Get the ld start address from ld_queue */ |
677 | dmae_set_reg(sh_chan, &sd->hw); | 721 | dmae_set_reg(sh_chan, &desc->hw); |
678 | dmae_start(sh_chan); | 722 | dmae_start(sh_chan); |
679 | break; | 723 | break; |
680 | } | 724 | } |
@@ -696,6 +740,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
696 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 740 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
697 | dma_cookie_t last_used; | 741 | dma_cookie_t last_used; |
698 | dma_cookie_t last_complete; | 742 | dma_cookie_t last_complete; |
743 | enum dma_status status; | ||
699 | 744 | ||
700 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 745 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
701 | 746 | ||
@@ -709,7 +754,27 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
709 | if (used) | 754 | if (used) |
710 | *used = last_used; | 755 | *used = last_used; |
711 | 756 | ||
712 | return dma_async_is_complete(cookie, last_complete, last_used); | 757 | spin_lock_bh(&sh_chan->desc_lock); |
758 | |||
759 | status = dma_async_is_complete(cookie, last_complete, last_used); | ||
760 | |||
761 | /* | ||
762 | * If we don't find cookie on the queue, it has been aborted and we have | ||
763 | * to report error | ||
764 | */ | ||
765 | if (status != DMA_SUCCESS) { | ||
766 | struct sh_desc *desc; | ||
767 | status = DMA_ERROR; | ||
768 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
769 | if (desc->cookie == cookie) { | ||
770 | status = DMA_IN_PROGRESS; | ||
771 | break; | ||
772 | } | ||
773 | } | ||
774 | |||
775 | spin_unlock_bh(&sh_chan->desc_lock); | ||
776 | |||
777 | return status; | ||
713 | } | 778 | } |
714 | 779 | ||
715 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 780 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
@@ -732,40 +797,32 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
732 | #if defined(CONFIG_CPU_SH4) | 797 | #if defined(CONFIG_CPU_SH4) |
733 | static irqreturn_t sh_dmae_err(int irq, void *data) | 798 | static irqreturn_t sh_dmae_err(int irq, void *data) |
734 | { | 799 | { |
735 | int err = 0; | ||
736 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 800 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; |
801 | int i; | ||
737 | 802 | ||
738 | /* IRQ Multi */ | 803 | /* halt the dma controller */ |
739 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 804 | sh_dmae_ctl_stop(shdev); |
740 | int __maybe_unused cnt = 0; | 805 | |
741 | switch (irq) { | 806 | /* We cannot detect, which channel caused the error, have to reset all */ |
742 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 807 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
743 | case DMTE6_IRQ: | 808 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
744 | cnt++; | 809 | if (sh_chan) { |
745 | #endif | 810 | struct sh_desc *desc; |
746 | case DMTE0_IRQ: | 811 | /* Stop the channel */ |
747 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { | 812 | dmae_halt(sh_chan); |
748 | disable_irq(irq); | 813 | /* Complete all */ |
749 | return IRQ_HANDLED; | 814 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
815 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
816 | desc->mark = DESC_IDLE; | ||
817 | if (tx->callback) | ||
818 | tx->callback(tx->callback_param); | ||
750 | } | 819 | } |
751 | default: | 820 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); |
752 | return IRQ_NONE; | ||
753 | } | 821 | } |
754 | } else { | ||
755 | /* reset dma controller */ | ||
756 | err = sh_dmae_rst(0); | ||
757 | if (err) | ||
758 | return err; | ||
759 | #ifdef SH_DMAC_BASE1 | ||
760 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
761 | err = sh_dmae_rst(1); | ||
762 | if (err) | ||
763 | return err; | ||
764 | } | ||
765 | #endif | ||
766 | disable_irq(irq); | ||
767 | return IRQ_HANDLED; | ||
768 | } | 822 | } |
823 | sh_dmae_rst(shdev); | ||
824 | |||
825 | return IRQ_HANDLED; | ||
769 | } | 826 | } |
770 | #endif | 827 | #endif |
771 | 828 | ||
@@ -796,19 +853,12 @@ static void dmae_do_tasklet(unsigned long data) | |||
796 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 853 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
797 | } | 854 | } |
798 | 855 | ||
799 | static unsigned int get_dmae_irq(unsigned int id) | 856 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
800 | { | 857 | int irq, unsigned long flags) |
801 | unsigned int irq = 0; | ||
802 | if (id < ARRAY_SIZE(dmte_irq_map)) | ||
803 | irq = dmte_irq_map[id]; | ||
804 | return irq; | ||
805 | } | ||
806 | |||
807 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | ||
808 | { | 858 | { |
809 | int err; | 859 | int err; |
810 | unsigned int irq = get_dmae_irq(id); | 860 | struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; |
811 | unsigned long irqflags = IRQF_DISABLED; | 861 | struct platform_device *pdev = to_platform_device(shdev->common.dev); |
812 | struct sh_dmae_chan *new_sh_chan; | 862 | struct sh_dmae_chan *new_sh_chan; |
813 | 863 | ||
814 | /* alloc channel */ | 864 | /* alloc channel */ |
@@ -819,8 +869,13 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
819 | return -ENOMEM; | 869 | return -ENOMEM; |
820 | } | 870 | } |
821 | 871 | ||
872 | /* copy struct dma_device */ | ||
873 | new_sh_chan->common.device = &shdev->common; | ||
874 | |||
822 | new_sh_chan->dev = shdev->common.dev; | 875 | new_sh_chan->dev = shdev->common.dev; |
823 | new_sh_chan->id = id; | 876 | new_sh_chan->id = id; |
877 | new_sh_chan->irq = irq; | ||
878 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
824 | 879 | ||
825 | /* Init DMA tasklet */ | 880 | /* Init DMA tasklet */ |
826 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 881 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
@@ -835,29 +890,20 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
835 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | 890 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); |
836 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | 891 | INIT_LIST_HEAD(&new_sh_chan->ld_free); |
837 | 892 | ||
838 | /* copy struct dma_device */ | ||
839 | new_sh_chan->common.device = &shdev->common; | ||
840 | |||
841 | /* Add the channel to DMA device channel list */ | 893 | /* Add the channel to DMA device channel list */ |
842 | list_add_tail(&new_sh_chan->common.device_node, | 894 | list_add_tail(&new_sh_chan->common.device_node, |
843 | &shdev->common.channels); | 895 | &shdev->common.channels); |
844 | shdev->common.chancnt++; | 896 | shdev->common.chancnt++; |
845 | 897 | ||
846 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 898 | if (pdev->id >= 0) |
847 | irqflags = IRQF_SHARED; | 899 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
848 | #if defined(DMTE6_IRQ) | 900 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); |
849 | if (irq >= DMTE6_IRQ) | 901 | else |
850 | irq = DMTE6_IRQ; | 902 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
851 | else | 903 | "sh-dma%d", new_sh_chan->id); |
852 | #endif | ||
853 | irq = DMTE0_IRQ; | ||
854 | } | ||
855 | |||
856 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | ||
857 | "sh-dmae%d", new_sh_chan->id); | ||
858 | 904 | ||
859 | /* set up channel irq */ | 905 | /* set up channel irq */ |
860 | err = request_irq(irq, &sh_dmae_interrupt, irqflags, | 906 | err = request_irq(irq, &sh_dmae_interrupt, flags, |
861 | new_sh_chan->dev_id, new_sh_chan); | 907 | new_sh_chan->dev_id, new_sh_chan); |
862 | if (err) { | 908 | if (err) { |
863 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 909 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
@@ -881,12 +927,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
881 | 927 | ||
882 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | 928 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { |
883 | if (shdev->chan[i]) { | 929 | if (shdev->chan[i]) { |
884 | struct sh_dmae_chan *shchan = shdev->chan[i]; | 930 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
885 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) | ||
886 | free_irq(dmte_irq_map[i], shchan); | ||
887 | 931 | ||
888 | list_del(&shchan->common.device_node); | 932 | free_irq(sh_chan->irq, sh_chan); |
889 | kfree(shchan); | 933 | |
934 | list_del(&sh_chan->common.device_node); | ||
935 | kfree(sh_chan); | ||
890 | shdev->chan[i] = NULL; | 936 | shdev->chan[i] = NULL; |
891 | } | 937 | } |
892 | } | 938 | } |
@@ -895,47 +941,84 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
895 | 941 | ||
896 | static int __init sh_dmae_probe(struct platform_device *pdev) | 942 | static int __init sh_dmae_probe(struct platform_device *pdev) |
897 | { | 943 | { |
898 | int err = 0, cnt, ecnt; | 944 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
899 | unsigned long irqflags = IRQF_DISABLED; | 945 | unsigned long irqflags = IRQF_DISABLED, |
900 | #if defined(CONFIG_CPU_SH4) | 946 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; |
901 | int eirq[] = { DMAE0_IRQ, | 947 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; |
902 | #if defined(DMAE1_IRQ) | 948 | int err, i, irq_cnt = 0, irqres = 0; |
903 | DMAE1_IRQ | ||
904 | #endif | ||
905 | }; | ||
906 | #endif | ||
907 | struct sh_dmae_device *shdev; | 949 | struct sh_dmae_device *shdev; |
950 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
908 | 951 | ||
909 | /* get platform data */ | 952 | /* get platform data */ |
910 | if (!pdev->dev.platform_data) | 953 | if (!pdata || !pdata->channel_num) |
911 | return -ENODEV; | 954 | return -ENODEV; |
912 | 955 | ||
956 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
957 | /* DMARS area is optional, if absent, this controller cannot do slave DMA */ | ||
958 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
959 | /* | ||
960 | * IRQ resources: | ||
961 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
962 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
963 | * start == end. If it is the only IRQ resource, all channels also | ||
964 | * use the same IRQ. | ||
965 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
966 | * ranges (start != end) | ||
967 | * 3. iff all events (channels and, optionally, error) on this | ||
968 | * controller use the same IRQ, only one IRQ resource can be | ||
969 | * specified, otherwise there must be one IRQ per channel, even if | ||
970 | * some of them are equal | ||
971 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
972 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
973 | * requested with the IRQF_SHARED flag | ||
974 | */ | ||
975 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
976 | if (!chan || !errirq_res) | ||
977 | return -ENODEV; | ||
978 | |||
979 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | ||
980 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | ||
981 | return -EBUSY; | ||
982 | } | ||
983 | |||
984 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
985 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
986 | err = -EBUSY; | ||
987 | goto ermrdmars; | ||
988 | } | ||
989 | |||
990 | err = -ENOMEM; | ||
913 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | 991 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
914 | if (!shdev) { | 992 | if (!shdev) { |
915 | dev_err(&pdev->dev, "No enough memory\n"); | 993 | dev_err(&pdev->dev, "Not enough memory\n"); |
916 | return -ENOMEM; | 994 | goto ealloc; |
995 | } | ||
996 | |||
997 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | ||
998 | if (!shdev->chan_reg) | ||
999 | goto emapchan; | ||
1000 | if (dmars) { | ||
1001 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | ||
1002 | if (!shdev->dmars) | ||
1003 | goto emapdmars; | ||
917 | } | 1004 | } |
918 | 1005 | ||
919 | /* platform data */ | 1006 | /* platform data */ |
920 | memcpy(&shdev->pdata, pdev->dev.platform_data, | 1007 | shdev->pdata = pdata; |
921 | sizeof(struct sh_dmae_pdata)); | 1008 | |
1009 | pm_runtime_enable(&pdev->dev); | ||
1010 | pm_runtime_get_sync(&pdev->dev); | ||
922 | 1011 | ||
923 | /* reset dma controller */ | 1012 | /* reset dma controller */ |
924 | err = sh_dmae_rst(0); | 1013 | err = sh_dmae_rst(shdev); |
925 | if (err) | 1014 | if (err) |
926 | goto rst_err; | 1015 | goto rst_err; |
927 | 1016 | ||
928 | /* SH7780/85/23 has DMAOR1 */ | ||
929 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
930 | err = sh_dmae_rst(1); | ||
931 | if (err) | ||
932 | goto rst_err; | ||
933 | } | ||
934 | |||
935 | INIT_LIST_HEAD(&shdev->common.channels); | 1017 | INIT_LIST_HEAD(&shdev->common.channels); |
936 | 1018 | ||
937 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 1019 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
938 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | 1020 | if (dmars) |
1021 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
939 | 1022 | ||
940 | shdev->common.device_alloc_chan_resources | 1023 | shdev->common.device_alloc_chan_resources |
941 | = sh_dmae_alloc_chan_resources; | 1024 | = sh_dmae_alloc_chan_resources; |
@@ -950,37 +1033,72 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
950 | 1033 | ||
951 | shdev->common.dev = &pdev->dev; | 1034 | shdev->common.dev = &pdev->dev; |
952 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 1035 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
953 | shdev->common.copy_align = 5; | 1036 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; |
954 | 1037 | ||
955 | #if defined(CONFIG_CPU_SH4) | 1038 | #if defined(CONFIG_CPU_SH4) |
956 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ | 1039 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
957 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 1040 | |
1041 | if (!chanirq_res) | ||
1042 | chanirq_res = errirq_res; | ||
1043 | else | ||
1044 | irqres++; | ||
1045 | |||
1046 | if (chanirq_res == errirq_res || | ||
1047 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
958 | irqflags = IRQF_SHARED; | 1048 | irqflags = IRQF_SHARED; |
959 | eirq[0] = DMTE0_IRQ; | 1049 | |
960 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 1050 | errirq = errirq_res->start; |
961 | eirq[1] = DMTE6_IRQ; | 1051 | |
962 | #endif | 1052 | err = request_irq(errirq, sh_dmae_err, irqflags, |
1053 | "DMAC Address Error", shdev); | ||
1054 | if (err) { | ||
1055 | dev_err(&pdev->dev, | ||
1056 | "DMA failed requesting irq #%d, error %d\n", | ||
1057 | errirq, err); | ||
1058 | goto eirq_err; | ||
963 | } | 1059 | } |
964 | 1060 | ||
965 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { | 1061 | #else |
966 | err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, | 1062 | chanirq_res = errirq_res; |
967 | "DMAC Address Error", shdev); | 1063 | #endif /* CONFIG_CPU_SH4 */ |
968 | if (err) { | 1064 | |
969 | dev_err(&pdev->dev, "DMA device request_irq" | 1065 | if (chanirq_res->start == chanirq_res->end && |
970 | "error (irq %d) with return %d\n", | 1066 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
971 | eirq[ecnt], err); | 1067 | /* Special case - all multiplexed */ |
972 | goto eirq_err; | 1068 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
1069 | chan_irq[irq_cnt] = chanirq_res->start; | ||
1070 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
973 | } | 1071 | } |
1072 | } else { | ||
1073 | do { | ||
1074 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
1075 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
1076 | IORESOURCE_IRQ_SHAREABLE) | ||
1077 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
1078 | else | ||
1079 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
1080 | dev_dbg(&pdev->dev, | ||
1081 | "Found IRQ %d for channel %d\n", | ||
1082 | i, irq_cnt); | ||
1083 | chan_irq[irq_cnt++] = i; | ||
1084 | } | ||
1085 | chanirq_res = platform_get_resource(pdev, | ||
1086 | IORESOURCE_IRQ, ++irqres); | ||
1087 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
974 | } | 1088 | } |
975 | #endif /* CONFIG_CPU_SH4 */ | 1089 | |
1090 | if (irq_cnt < pdata->channel_num) | ||
1091 | goto eirqres; | ||
976 | 1092 | ||
977 | /* Create DMA Channel */ | 1093 | /* Create DMA Channel */ |
978 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { | 1094 | for (i = 0; i < pdata->channel_num; i++) { |
979 | err = sh_dmae_chan_probe(shdev, cnt); | 1095 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); |
980 | if (err) | 1096 | if (err) |
981 | goto chan_probe_err; | 1097 | goto chan_probe_err; |
982 | } | 1098 | } |
983 | 1099 | ||
1100 | pm_runtime_put(&pdev->dev); | ||
1101 | |||
984 | platform_set_drvdata(pdev, shdev); | 1102 | platform_set_drvdata(pdev, shdev); |
985 | dma_async_device_register(&shdev->common); | 1103 | dma_async_device_register(&shdev->common); |
986 | 1104 | ||
@@ -988,13 +1106,24 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
988 | 1106 | ||
989 | chan_probe_err: | 1107 | chan_probe_err: |
990 | sh_dmae_chan_remove(shdev); | 1108 | sh_dmae_chan_remove(shdev); |
991 | 1109 | eirqres: | |
1110 | #if defined(CONFIG_CPU_SH4) | ||
1111 | free_irq(errirq, shdev); | ||
992 | eirq_err: | 1112 | eirq_err: |
993 | for (ecnt-- ; ecnt >= 0; ecnt--) | 1113 | #endif |
994 | free_irq(eirq[ecnt], shdev); | ||
995 | |||
996 | rst_err: | 1114 | rst_err: |
1115 | pm_runtime_put(&pdev->dev); | ||
1116 | if (dmars) | ||
1117 | iounmap(shdev->dmars); | ||
1118 | emapdmars: | ||
1119 | iounmap(shdev->chan_reg); | ||
1120 | emapchan: | ||
997 | kfree(shdev); | 1121 | kfree(shdev); |
1122 | ealloc: | ||
1123 | if (dmars) | ||
1124 | release_mem_region(dmars->start, resource_size(dmars)); | ||
1125 | ermrdmars: | ||
1126 | release_mem_region(chan->start, resource_size(chan)); | ||
998 | 1127 | ||
999 | return err; | 1128 | return err; |
1000 | } | 1129 | } |
@@ -1002,36 +1131,39 @@ rst_err: | |||
1002 | static int __exit sh_dmae_remove(struct platform_device *pdev) | 1131 | static int __exit sh_dmae_remove(struct platform_device *pdev) |
1003 | { | 1132 | { |
1004 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1133 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1134 | struct resource *res; | ||
1135 | int errirq = platform_get_irq(pdev, 0); | ||
1005 | 1136 | ||
1006 | dma_async_device_unregister(&shdev->common); | 1137 | dma_async_device_unregister(&shdev->common); |
1007 | 1138 | ||
1008 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 1139 | if (errirq > 0) |
1009 | free_irq(DMTE0_IRQ, shdev); | 1140 | free_irq(errirq, shdev); |
1010 | #if defined(DMTE6_IRQ) | ||
1011 | free_irq(DMTE6_IRQ, shdev); | ||
1012 | #endif | ||
1013 | } | ||
1014 | 1141 | ||
1015 | /* channel data remove */ | 1142 | /* channel data remove */ |
1016 | sh_dmae_chan_remove(shdev); | 1143 | sh_dmae_chan_remove(shdev); |
1017 | 1144 | ||
1018 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { | 1145 | pm_runtime_disable(&pdev->dev); |
1019 | free_irq(DMAE0_IRQ, shdev); | 1146 | |
1020 | #if defined(DMAE1_IRQ) | 1147 | if (shdev->dmars) |
1021 | free_irq(DMAE1_IRQ, shdev); | 1148 | iounmap(shdev->dmars); |
1022 | #endif | 1149 | iounmap(shdev->chan_reg); |
1023 | } | 1150 | |
1024 | kfree(shdev); | 1151 | kfree(shdev); |
1025 | 1152 | ||
1153 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1154 | if (res) | ||
1155 | release_mem_region(res->start, resource_size(res)); | ||
1156 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1157 | if (res) | ||
1158 | release_mem_region(res->start, resource_size(res)); | ||
1159 | |||
1026 | return 0; | 1160 | return 0; |
1027 | } | 1161 | } |
1028 | 1162 | ||
1029 | static void sh_dmae_shutdown(struct platform_device *pdev) | 1163 | static void sh_dmae_shutdown(struct platform_device *pdev) |
1030 | { | 1164 | { |
1031 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1165 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1032 | sh_dmae_ctl_stop(0); | 1166 | sh_dmae_ctl_stop(shdev); |
1033 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
1034 | sh_dmae_ctl_stop(1); | ||
1035 | } | 1167 | } |
1036 | 1168 | ||
1037 | static struct platform_driver sh_dmae_driver = { | 1169 | static struct platform_driver sh_dmae_driver = { |
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 7e227f3c87c4..153609a1e96c 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -17,23 +17,9 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | 19 | ||
20 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ | 20 | #include <asm/dmaengine.h> |
21 | |||
22 | struct sh_dmae_regs { | ||
23 | u32 sar; /* SAR / source address */ | ||
24 | u32 dar; /* DAR / destination address */ | ||
25 | u32 tcr; /* TCR / transfer count */ | ||
26 | }; | ||
27 | 21 | ||
28 | struct sh_desc { | 22 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ |
29 | struct sh_dmae_regs hw; | ||
30 | struct list_head node; | ||
31 | struct dma_async_tx_descriptor async_tx; | ||
32 | enum dma_data_direction direction; | ||
33 | dma_cookie_t cookie; | ||
34 | int chunks; | ||
35 | int mark; | ||
36 | }; | ||
37 | 23 | ||
38 | struct device; | 24 | struct device; |
39 | 25 | ||
@@ -47,14 +33,18 @@ struct sh_dmae_chan { | |||
47 | struct tasklet_struct tasklet; /* Tasklet */ | 33 | struct tasklet_struct tasklet; /* Tasklet */ |
48 | int descs_allocated; /* desc count */ | 34 | int descs_allocated; /* desc count */ |
49 | int xmit_shift; /* log_2(bytes_per_xfer) */ | 35 | int xmit_shift; /* log_2(bytes_per_xfer) */ |
36 | int irq; | ||
50 | int id; /* Raw id of this channel */ | 37 | int id; /* Raw id of this channel */ |
38 | u32 __iomem *base; | ||
51 | char dev_id[16]; /* unique name per DMAC of channel */ | 39 | char dev_id[16]; /* unique name per DMAC of channel */ |
52 | }; | 40 | }; |
53 | 41 | ||
54 | struct sh_dmae_device { | 42 | struct sh_dmae_device { |
55 | struct dma_device common; | 43 | struct dma_device common; |
56 | struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; | 44 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; |
57 | struct sh_dmae_pdata pdata; | 45 | struct sh_dmae_pdata *pdata; |
46 | u32 __iomem *chan_reg; | ||
47 | u16 __iomem *dmars; | ||
58 | }; | 48 | }; |
59 | 49 | ||
60 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) | 50 | #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) |