diff options
Diffstat (limited to 'drivers/dma/shdma.c')
-rw-r--r-- | drivers/dma/shdma.c | 1097 |
1 files changed, 760 insertions, 337 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 034ecf0ace03..6f25a20de99f 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -19,47 +19,54 @@ | |||
19 | 19 | ||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | ||
22 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
23 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
25 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
26 | #include <linux/dmapool.h> | ||
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <cpu/dma.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <asm/dma-sh.h> | 29 | |
30 | #include <asm/dmaengine.h> | ||
31 | |||
30 | #include "shdma.h" | 32 | #include "shdma.h" |
31 | 33 | ||
32 | /* DMA descriptor control */ | 34 | /* DMA descriptor control */ |
33 | #define DESC_LAST (-1) | 35 | enum sh_dmae_desc_status { |
34 | #define DESC_COMP (1) | 36 | DESC_IDLE, |
35 | #define DESC_NCOMP (0) | 37 | DESC_PREPARED, |
38 | DESC_SUBMITTED, | ||
39 | DESC_COMPLETED, /* completed, have to call callback */ | ||
40 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | ||
41 | }; | ||
36 | 42 | ||
37 | #define NR_DESCS_PER_CHANNEL 32 | 43 | #define NR_DESCS_PER_CHANNEL 32 |
38 | /* | 44 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
39 | * Define the default configuration for dual address memory-memory transfer. | 45 | #define LOG2_DEFAULT_XFER_SIZE 2 |
40 | * The 0x400 value represents auto-request, external->external. | 46 | |
41 | * | 47 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ |
42 | * And this driver set 4byte burst mode. | 48 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; |
43 | * If you want to change mode, you need to change RS_DEFAULT of value. | 49 | |
44 | * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) | 50 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
45 | */ | ||
46 | #define RS_DEFAULT (RS_DUAL) | ||
47 | 51 | ||
48 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) | ||
49 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 52 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
50 | { | 53 | { |
51 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 54 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); |
52 | } | 55 | } |
53 | 56 | ||
54 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 57 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
55 | { | 58 | { |
56 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); | 59 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); |
57 | } | 60 | } |
58 | 61 | ||
59 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 62 | static u16 dmaor_read(struct sh_dmae_device *shdev) |
60 | { | 63 | { |
61 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ | 64 | return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); |
62 | sh_dmae_writel(sh_chan, chcr, CHCR); | 65 | } |
66 | |||
67 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | ||
68 | { | ||
69 | __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); | ||
63 | } | 70 | } |
64 | 71 | ||
65 | /* | 72 | /* |
@@ -67,59 +74,83 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) | |||
67 | * | 74 | * |
68 | * SH7780 has two DMAOR register | 75 | * SH7780 has two DMAOR register |
69 | */ | 76 | */ |
70 | static void sh_dmae_ctl_stop(int id) | 77 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
71 | { | 78 | { |
72 | unsigned short dmaor = dmaor_read_reg(id); | 79 | unsigned short dmaor = dmaor_read(shdev); |
73 | 80 | ||
74 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); | 81 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
75 | dmaor_write_reg(id, dmaor); | ||
76 | } | 82 | } |
77 | 83 | ||
78 | static int sh_dmae_rst(int id) | 84 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
79 | { | 85 | { |
80 | unsigned short dmaor; | 86 | unsigned short dmaor; |
81 | 87 | ||
82 | sh_dmae_ctl_stop(id); | 88 | sh_dmae_ctl_stop(shdev); |
83 | dmaor = (dmaor_read_reg(id)|DMAOR_INIT); | 89 | dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; |
84 | 90 | ||
85 | dmaor_write_reg(id, dmaor); | 91 | dmaor_write(shdev, dmaor); |
86 | if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { | 92 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { |
87 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); | 93 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); |
88 | return -EINVAL; | 94 | return -EINVAL; |
89 | } | 95 | } |
90 | return 0; | 96 | return 0; |
91 | } | 97 | } |
92 | 98 | ||
93 | static int dmae_is_idle(struct sh_dmae_chan *sh_chan) | 99 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
94 | { | 100 | { |
95 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 101 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
96 | if (chcr & CHCR_DE) { | 102 | |
97 | if (!(chcr & CHCR_TE)) | 103 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
98 | return -EBUSY; /* working */ | 104 | return true; /* working */ |
99 | } | 105 | |
100 | return 0; /* waiting */ | 106 | return false; /* waiting */ |
101 | } | 107 | } |
102 | 108 | ||
103 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) | 109 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) |
104 | { | 110 | { |
105 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 111 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
106 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; | 112 | struct sh_dmae_device, common); |
113 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
114 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | ||
115 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | ||
116 | |||
117 | if (cnt >= pdata->ts_shift_num) | ||
118 | cnt = 0; | ||
119 | |||
120 | return pdata->ts_shift[cnt]; | ||
121 | } | ||
122 | |||
123 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | ||
124 | { | ||
125 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | ||
126 | struct sh_dmae_device, common); | ||
127 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
128 | int i; | ||
129 | |||
130 | for (i = 0; i < pdata->ts_shift_num; i++) | ||
131 | if (pdata->ts_shift[i] == l2size) | ||
132 | break; | ||
133 | |||
134 | if (i == pdata->ts_shift_num) | ||
135 | i = 0; | ||
136 | |||
137 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | ||
138 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | ||
107 | } | 139 | } |
108 | 140 | ||
109 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) | 141 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
110 | { | 142 | { |
111 | sh_dmae_writel(sh_chan, hw.sar, SAR); | 143 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
112 | sh_dmae_writel(sh_chan, hw.dar, DAR); | 144 | sh_dmae_writel(sh_chan, hw->dar, DAR); |
113 | sh_dmae_writel(sh_chan, | 145 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); |
114 | (hw.tcr >> calc_xmit_shift(sh_chan)), TCR); | ||
115 | } | 146 | } |
116 | 147 | ||
117 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 148 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
118 | { | 149 | { |
119 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 150 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
120 | 151 | ||
121 | chcr |= (CHCR_DE|CHCR_IE); | 152 | chcr |= CHCR_DE | CHCR_IE; |
122 | sh_dmae_writel(sh_chan, chcr, CHCR); | 153 | sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); |
123 | } | 154 | } |
124 | 155 | ||
125 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | 156 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
@@ -130,63 +161,53 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan) | |||
130 | sh_dmae_writel(sh_chan, chcr, CHCR); | 161 | sh_dmae_writel(sh_chan, chcr, CHCR); |
131 | } | 162 | } |
132 | 163 | ||
164 | static void dmae_init(struct sh_dmae_chan *sh_chan) | ||
165 | { | ||
166 | /* | ||
167 | * Default configuration for dual address memory-memory transfer. | ||
168 | * 0x400 represents auto-request. | ||
169 | */ | ||
170 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | ||
171 | LOG2_DEFAULT_XFER_SIZE); | ||
172 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | ||
173 | sh_dmae_writel(sh_chan, chcr, CHCR); | ||
174 | } | ||
175 | |||
133 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 176 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
134 | { | 177 | { |
135 | int ret = dmae_is_idle(sh_chan); | ||
136 | /* When DMA was working, can not set data to CHCR */ | 178 | /* When DMA was working, can not set data to CHCR */ |
137 | if (ret) | 179 | if (dmae_is_busy(sh_chan)) |
138 | return ret; | 180 | return -EBUSY; |
139 | 181 | ||
182 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | ||
140 | sh_dmae_writel(sh_chan, val, CHCR); | 183 | sh_dmae_writel(sh_chan, val, CHCR); |
184 | |||
141 | return 0; | 185 | return 0; |
142 | } | 186 | } |
143 | 187 | ||
144 | #define DMARS1_ADDR 0x04 | ||
145 | #define DMARS2_ADDR 0x08 | ||
146 | #define DMARS_SHIFT 8 | ||
147 | #define DMARS_CHAN_MSK 0x01 | ||
148 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 188 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
149 | { | 189 | { |
150 | u32 addr; | 190 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
151 | int shift = 0; | 191 | struct sh_dmae_device, common); |
152 | int ret = dmae_is_idle(sh_chan); | 192 | struct sh_dmae_pdata *pdata = shdev->pdata; |
153 | if (ret) | 193 | struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; |
154 | return ret; | 194 | u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); |
155 | 195 | int shift = chan_pdata->dmars_bit; | |
156 | if (sh_chan->id & DMARS_CHAN_MSK) | ||
157 | shift = DMARS_SHIFT; | ||
158 | |||
159 | switch (sh_chan->id) { | ||
160 | /* DMARS0 */ | ||
161 | case 0: | ||
162 | case 1: | ||
163 | addr = SH_DMARS_BASE; | ||
164 | break; | ||
165 | /* DMARS1 */ | ||
166 | case 2: | ||
167 | case 3: | ||
168 | addr = (SH_DMARS_BASE + DMARS1_ADDR); | ||
169 | break; | ||
170 | /* DMARS2 */ | ||
171 | case 4: | ||
172 | case 5: | ||
173 | addr = (SH_DMARS_BASE + DMARS2_ADDR); | ||
174 | break; | ||
175 | default: | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | 196 | ||
179 | ctrl_outw((val << shift) | | 197 | if (dmae_is_busy(sh_chan)) |
180 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), | 198 | return -EBUSY; |
181 | addr); | 199 | |
200 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | ||
201 | addr); | ||
182 | 202 | ||
183 | return 0; | 203 | return 0; |
184 | } | 204 | } |
185 | 205 | ||
186 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | 206 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) |
187 | { | 207 | { |
188 | struct sh_desc *desc = tx_to_sh_desc(tx); | 208 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; |
189 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); | 209 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); |
210 | dma_async_tx_callback callback = tx->callback; | ||
190 | dma_cookie_t cookie; | 211 | dma_cookie_t cookie; |
191 | 212 | ||
192 | spin_lock_bh(&sh_chan->desc_lock); | 213 | spin_lock_bh(&sh_chan->desc_lock); |
@@ -196,51 +217,108 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | |||
196 | if (cookie < 0) | 217 | if (cookie < 0) |
197 | cookie = 1; | 218 | cookie = 1; |
198 | 219 | ||
199 | /* If desc only in the case of 1 */ | 220 | sh_chan->common.cookie = cookie; |
200 | if (desc->async_tx.cookie != -EBUSY) | 221 | tx->cookie = cookie; |
201 | desc->async_tx.cookie = cookie; | 222 | |
202 | sh_chan->common.cookie = desc->async_tx.cookie; | 223 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
224 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | ||
225 | /* | ||
226 | * All chunks are on the global ld_free, so, we have to find | ||
227 | * the end of the chain ourselves | ||
228 | */ | ||
229 | if (chunk != desc && (chunk->mark == DESC_IDLE || | ||
230 | chunk->async_tx.cookie > 0 || | ||
231 | chunk->async_tx.cookie == -EBUSY || | ||
232 | &chunk->node == &sh_chan->ld_free)) | ||
233 | break; | ||
234 | chunk->mark = DESC_SUBMITTED; | ||
235 | /* Callback goes to the last chunk */ | ||
236 | chunk->async_tx.callback = NULL; | ||
237 | chunk->cookie = cookie; | ||
238 | list_move_tail(&chunk->node, &sh_chan->ld_queue); | ||
239 | last = chunk; | ||
240 | } | ||
241 | |||
242 | last->async_tx.callback = callback; | ||
243 | last->async_tx.callback_param = tx->callback_param; | ||
203 | 244 | ||
204 | list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); | 245 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", |
246 | tx->cookie, &last->async_tx, sh_chan->id, | ||
247 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); | ||
205 | 248 | ||
206 | spin_unlock_bh(&sh_chan->desc_lock); | 249 | spin_unlock_bh(&sh_chan->desc_lock); |
207 | 250 | ||
208 | return cookie; | 251 | return cookie; |
209 | } | 252 | } |
210 | 253 | ||
254 | /* Called with desc_lock held */ | ||
211 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | 255 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) |
212 | { | 256 | { |
213 | struct sh_desc *desc, *_desc, *ret = NULL; | 257 | struct sh_desc *desc; |
214 | 258 | ||
215 | spin_lock_bh(&sh_chan->desc_lock); | 259 | list_for_each_entry(desc, &sh_chan->ld_free, node) |
216 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { | 260 | if (desc->mark != DESC_PREPARED) { |
217 | if (async_tx_test_ack(&desc->async_tx)) { | 261 | BUG_ON(desc->mark != DESC_IDLE); |
218 | list_del(&desc->node); | 262 | list_del(&desc->node); |
219 | ret = desc; | 263 | return desc; |
220 | break; | ||
221 | } | 264 | } |
222 | } | ||
223 | spin_unlock_bh(&sh_chan->desc_lock); | ||
224 | 265 | ||
225 | return ret; | 266 | return NULL; |
226 | } | 267 | } |
227 | 268 | ||
228 | static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc) | 269 | static struct sh_dmae_slave_config *sh_dmae_find_slave( |
270 | struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) | ||
229 | { | 271 | { |
230 | if (desc) { | 272 | struct dma_device *dma_dev = sh_chan->common.device; |
231 | spin_lock_bh(&sh_chan->desc_lock); | 273 | struct sh_dmae_device *shdev = container_of(dma_dev, |
274 | struct sh_dmae_device, common); | ||
275 | struct sh_dmae_pdata *pdata = shdev->pdata; | ||
276 | int i; | ||
232 | 277 | ||
233 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); | 278 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) |
234 | list_add(&desc->node, &sh_chan->ld_free); | 279 | return NULL; |
235 | 280 | ||
236 | spin_unlock_bh(&sh_chan->desc_lock); | 281 | for (i = 0; i < pdata->slave_num; i++) |
237 | } | 282 | if (pdata->slave[i].slave_id == slave_id) |
283 | return pdata->slave + i; | ||
284 | |||
285 | return NULL; | ||
238 | } | 286 | } |
239 | 287 | ||
240 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | 288 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
241 | { | 289 | { |
242 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 290 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
243 | struct sh_desc *desc; | 291 | struct sh_desc *desc; |
292 | struct sh_dmae_slave *param = chan->private; | ||
293 | int ret; | ||
294 | |||
295 | pm_runtime_get_sync(sh_chan->dev); | ||
296 | |||
297 | /* | ||
298 | * This relies on the guarantee from dmaengine that alloc_chan_resources | ||
299 | * never runs concurrently with itself or free_chan_resources. | ||
300 | */ | ||
301 | if (param) { | ||
302 | struct sh_dmae_slave_config *cfg; | ||
303 | |||
304 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); | ||
305 | if (!cfg) { | ||
306 | ret = -EINVAL; | ||
307 | goto efindslave; | ||
308 | } | ||
309 | |||
310 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { | ||
311 | ret = -EBUSY; | ||
312 | goto etestused; | ||
313 | } | ||
314 | |||
315 | param->config = cfg; | ||
316 | |||
317 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
318 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
319 | } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { | ||
320 | dmae_init(sh_chan); | ||
321 | } | ||
244 | 322 | ||
245 | spin_lock_bh(&sh_chan->desc_lock); | 323 | spin_lock_bh(&sh_chan->desc_lock); |
246 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | 324 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { |
@@ -253,16 +331,28 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
253 | dma_async_tx_descriptor_init(&desc->async_tx, | 331 | dma_async_tx_descriptor_init(&desc->async_tx, |
254 | &sh_chan->common); | 332 | &sh_chan->common); |
255 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | 333 | desc->async_tx.tx_submit = sh_dmae_tx_submit; |
256 | desc->async_tx.flags = DMA_CTRL_ACK; | 334 | desc->mark = DESC_IDLE; |
257 | INIT_LIST_HEAD(&desc->tx_list); | ||
258 | sh_dmae_put_desc(sh_chan, desc); | ||
259 | 335 | ||
260 | spin_lock_bh(&sh_chan->desc_lock); | 336 | spin_lock_bh(&sh_chan->desc_lock); |
337 | list_add(&desc->node, &sh_chan->ld_free); | ||
261 | sh_chan->descs_allocated++; | 338 | sh_chan->descs_allocated++; |
262 | } | 339 | } |
263 | spin_unlock_bh(&sh_chan->desc_lock); | 340 | spin_unlock_bh(&sh_chan->desc_lock); |
264 | 341 | ||
342 | if (!sh_chan->descs_allocated) { | ||
343 | ret = -ENOMEM; | ||
344 | goto edescalloc; | ||
345 | } | ||
346 | |||
265 | return sh_chan->descs_allocated; | 347 | return sh_chan->descs_allocated; |
348 | |||
349 | edescalloc: | ||
350 | if (param) | ||
351 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
352 | etestused: | ||
353 | efindslave: | ||
354 | pm_runtime_put(sh_chan->dev); | ||
355 | return ret; | ||
266 | } | 356 | } |
267 | 357 | ||
268 | /* | 358 | /* |
@@ -273,8 +363,20 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
273 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 363 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
274 | struct sh_desc *desc, *_desc; | 364 | struct sh_desc *desc, *_desc; |
275 | LIST_HEAD(list); | 365 | LIST_HEAD(list); |
366 | int descs = sh_chan->descs_allocated; | ||
367 | |||
368 | dmae_halt(sh_chan); | ||
369 | |||
370 | /* Prepared and not submitted descriptors can still be on the queue */ | ||
371 | if (!list_empty(&sh_chan->ld_queue)) | ||
372 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
373 | |||
374 | if (chan->private) { | ||
375 | /* The caller is holding dma_list_mutex */ | ||
376 | struct sh_dmae_slave *param = chan->private; | ||
377 | clear_bit(param->slave_id, sh_dmae_slave_used); | ||
378 | } | ||
276 | 379 | ||
277 | BUG_ON(!list_empty(&sh_chan->ld_queue)); | ||
278 | spin_lock_bh(&sh_chan->desc_lock); | 380 | spin_lock_bh(&sh_chan->desc_lock); |
279 | 381 | ||
280 | list_splice_init(&sh_chan->ld_free, &list); | 382 | list_splice_init(&sh_chan->ld_free, &list); |
@@ -282,128 +384,362 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
282 | 384 | ||
283 | spin_unlock_bh(&sh_chan->desc_lock); | 385 | spin_unlock_bh(&sh_chan->desc_lock); |
284 | 386 | ||
387 | if (descs > 0) | ||
388 | pm_runtime_put(sh_chan->dev); | ||
389 | |||
285 | list_for_each_entry_safe(desc, _desc, &list, node) | 390 | list_for_each_entry_safe(desc, _desc, &list, node) |
286 | kfree(desc); | 391 | kfree(desc); |
287 | } | 392 | } |
288 | 393 | ||
289 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 394 | /** |
290 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 395 | * sh_dmae_add_desc - get, set up and return one transfer descriptor |
291 | size_t len, unsigned long flags) | 396 | * @sh_chan: DMA channel |
397 | * @flags: DMA transfer flags | ||
398 | * @dest: destination DMA address, incremented when direction equals | ||
399 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | ||
400 | * @src: source DMA address, incremented when direction equals | ||
401 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | ||
402 | * @len: DMA transfer length | ||
403 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | ||
404 | * @direction: needed for slave DMA to decide which address to keep constant, | ||
405 | * equals DMA_BIDIRECTIONAL for MEMCPY | ||
406 | * Returns 0 or an error | ||
407 | * Locks: called with desc_lock held | ||
408 | */ | ||
409 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | ||
410 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | ||
411 | struct sh_desc **first, enum dma_data_direction direction) | ||
292 | { | 412 | { |
293 | struct sh_dmae_chan *sh_chan; | 413 | struct sh_desc *new; |
294 | struct sh_desc *first = NULL, *prev = NULL, *new; | ||
295 | size_t copy_size; | 414 | size_t copy_size; |
296 | 415 | ||
297 | if (!chan) | 416 | if (!*len) |
298 | return NULL; | 417 | return NULL; |
299 | 418 | ||
300 | if (!len) | 419 | /* Allocate the link descriptor from the free list */ |
420 | new = sh_dmae_get_desc(sh_chan); | ||
421 | if (!new) { | ||
422 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | ||
301 | return NULL; | 423 | return NULL; |
424 | } | ||
302 | 425 | ||
303 | sh_chan = to_sh_chan(chan); | 426 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); |
304 | 427 | ||
305 | do { | 428 | new->hw.sar = *src; |
306 | /* Allocate the link descriptor from DMA pool */ | 429 | new->hw.dar = *dest; |
307 | new = sh_dmae_get_desc(sh_chan); | 430 | new->hw.tcr = copy_size; |
308 | if (!new) { | 431 | |
309 | dev_err(sh_chan->dev, | 432 | if (!*first) { |
310 | "No free memory for link descriptor\n"); | 433 | /* First desc */ |
311 | goto err_get_desc; | 434 | new->async_tx.cookie = -EBUSY; |
312 | } | 435 | *first = new; |
436 | } else { | ||
437 | /* Other desc - invisible to the user */ | ||
438 | new->async_tx.cookie = -EINVAL; | ||
439 | } | ||
440 | |||
441 | dev_dbg(sh_chan->dev, | ||
442 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | ||
443 | copy_size, *len, *src, *dest, &new->async_tx, | ||
444 | new->async_tx.cookie, sh_chan->xmit_shift); | ||
445 | |||
446 | new->mark = DESC_PREPARED; | ||
447 | new->async_tx.flags = flags; | ||
448 | new->direction = direction; | ||
449 | |||
450 | *len -= copy_size; | ||
451 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | ||
452 | *src += copy_size; | ||
453 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | ||
454 | *dest += copy_size; | ||
455 | |||
456 | return new; | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | ||
461 | * | ||
462 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
463 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
464 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
465 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
466 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | ||
467 | * and the SG list contains only one element and points at the source buffer. | ||
468 | */ | ||
469 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | ||
470 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | ||
471 | enum dma_data_direction direction, unsigned long flags) | ||
472 | { | ||
473 | struct scatterlist *sg; | ||
474 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | ||
475 | LIST_HEAD(tx_list); | ||
476 | int chunks = 0; | ||
477 | int i; | ||
478 | |||
479 | if (!sg_len) | ||
480 | return NULL; | ||
481 | |||
482 | for_each_sg(sgl, sg, sg_len, i) | ||
483 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | ||
484 | (SH_DMA_TCR_MAX + 1); | ||
485 | |||
486 | /* Have to lock the whole loop to protect against concurrent release */ | ||
487 | spin_lock_bh(&sh_chan->desc_lock); | ||
313 | 488 | ||
314 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX); | 489 | /* |
490 | * Chaining: | ||
491 | * first descriptor is what user is dealing with in all API calls, its | ||
492 | * cookie is at first set to -EBUSY, at tx-submit to a positive | ||
493 | * number | ||
494 | * if more than one chunk is needed further chunks have cookie = -EINVAL | ||
495 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | ||
496 | * all chunks are linked onto the tx_list head with their .node heads | ||
497 | * only during this function, then they are immediately spliced | ||
498 | * back onto the free list in form of a chain | ||
499 | */ | ||
500 | for_each_sg(sgl, sg, sg_len, i) { | ||
501 | dma_addr_t sg_addr = sg_dma_address(sg); | ||
502 | size_t len = sg_dma_len(sg); | ||
503 | |||
504 | if (!len) | ||
505 | goto err_get_desc; | ||
315 | 506 | ||
316 | new->hw.sar = dma_src; | 507 | do { |
317 | new->hw.dar = dma_dest; | 508 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
318 | new->hw.tcr = copy_size; | 509 | i, sg, len, (unsigned long long)sg_addr); |
319 | if (!first) | 510 | |
320 | first = new; | 511 | if (direction == DMA_FROM_DEVICE) |
512 | new = sh_dmae_add_desc(sh_chan, flags, | ||
513 | &sg_addr, addr, &len, &first, | ||
514 | direction); | ||
515 | else | ||
516 | new = sh_dmae_add_desc(sh_chan, flags, | ||
517 | addr, &sg_addr, &len, &first, | ||
518 | direction); | ||
519 | if (!new) | ||
520 | goto err_get_desc; | ||
521 | |||
522 | new->chunks = chunks--; | ||
523 | list_add_tail(&new->node, &tx_list); | ||
524 | } while (len); | ||
525 | } | ||
321 | 526 | ||
322 | new->mark = DESC_NCOMP; | 527 | if (new != first) |
323 | async_tx_ack(&new->async_tx); | 528 | new->async_tx.cookie = -ENOSPC; |
324 | 529 | ||
325 | prev = new; | 530 | /* Put them back on the free list, so, they don't get lost */ |
326 | len -= copy_size; | 531 | list_splice_tail(&tx_list, &sh_chan->ld_free); |
327 | dma_src += copy_size; | ||
328 | dma_dest += copy_size; | ||
329 | /* Insert the link descriptor to the LD ring */ | ||
330 | list_add_tail(&new->node, &first->tx_list); | ||
331 | } while (len); | ||
332 | 532 | ||
333 | new->async_tx.flags = flags; /* client is in control of this ack */ | 533 | spin_unlock_bh(&sh_chan->desc_lock); |
334 | new->async_tx.cookie = -EBUSY; /* Last desc */ | ||
335 | 534 | ||
336 | return &first->async_tx; | 535 | return &first->async_tx; |
337 | 536 | ||
338 | err_get_desc: | 537 | err_get_desc: |
339 | sh_dmae_put_desc(sh_chan, first); | 538 | list_for_each_entry(new, &tx_list, node) |
539 | new->mark = DESC_IDLE; | ||
540 | list_splice(&tx_list, &sh_chan->ld_free); | ||
541 | |||
542 | spin_unlock_bh(&sh_chan->desc_lock); | ||
543 | |||
340 | return NULL; | 544 | return NULL; |
545 | } | ||
546 | |||
547 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | ||
548 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
549 | size_t len, unsigned long flags) | ||
550 | { | ||
551 | struct sh_dmae_chan *sh_chan; | ||
552 | struct scatterlist sg; | ||
553 | |||
554 | if (!chan || !len) | ||
555 | return NULL; | ||
556 | |||
557 | chan->private = NULL; | ||
341 | 558 | ||
559 | sh_chan = to_sh_chan(chan); | ||
560 | |||
561 | sg_init_table(&sg, 1); | ||
562 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
563 | offset_in_page(dma_src)); | ||
564 | sg_dma_address(&sg) = dma_src; | ||
565 | sg_dma_len(&sg) = len; | ||
566 | |||
567 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | ||
568 | flags); | ||
342 | } | 569 | } |
343 | 570 | ||
344 | /* | 571 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( |
345 | * sh_chan_ld_cleanup - Clean up link descriptors | 572 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
346 | * | 573 | enum dma_data_direction direction, unsigned long flags) |
347 | * This function clean up the ld_queue of DMA channel. | 574 | { |
348 | */ | 575 | struct sh_dmae_slave *param; |
349 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan) | 576 | struct sh_dmae_chan *sh_chan; |
577 | |||
578 | if (!chan) | ||
579 | return NULL; | ||
580 | |||
581 | sh_chan = to_sh_chan(chan); | ||
582 | param = chan->private; | ||
583 | |||
584 | /* Someone calling slave DMA on a public channel? */ | ||
585 | if (!param || !sg_len) { | ||
586 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | ||
587 | __func__, param, sg_len, param ? param->slave_id : -1); | ||
588 | return NULL; | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * if (param != NULL), this is a successfully requested slave channel, | ||
593 | * therefore param->config != NULL too. | ||
594 | */ | ||
595 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr, | ||
596 | direction, flags); | ||
597 | } | ||
598 | |||
599 | static void sh_dmae_terminate_all(struct dma_chan *chan) | ||
600 | { | ||
601 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | ||
602 | |||
603 | if (!chan) | ||
604 | return; | ||
605 | |||
606 | dmae_halt(sh_chan); | ||
607 | |||
608 | spin_lock_bh(&sh_chan->desc_lock); | ||
609 | if (!list_empty(&sh_chan->ld_queue)) { | ||
610 | /* Record partial transfer */ | ||
611 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | ||
612 | struct sh_desc, node); | ||
613 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | ||
614 | sh_chan->xmit_shift; | ||
615 | |||
616 | } | ||
617 | spin_unlock_bh(&sh_chan->desc_lock); | ||
618 | |||
619 | sh_dmae_chan_ld_cleanup(sh_chan, true); | ||
620 | } | ||
621 | |||
622 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | ||
350 | { | 623 | { |
351 | struct sh_desc *desc, *_desc; | 624 | struct sh_desc *desc, *_desc; |
625 | /* Is the "exposed" head of a chain acked? */ | ||
626 | bool head_acked = false; | ||
627 | dma_cookie_t cookie = 0; | ||
628 | dma_async_tx_callback callback = NULL; | ||
629 | void *param = NULL; | ||
352 | 630 | ||
353 | spin_lock_bh(&sh_chan->desc_lock); | 631 | spin_lock_bh(&sh_chan->desc_lock); |
354 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | 632 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { |
355 | dma_async_tx_callback callback; | 633 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
356 | void *callback_param; | 634 | |
357 | 635 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | |
358 | /* non send data */ | 636 | BUG_ON(desc->mark != DESC_SUBMITTED && |
359 | if (desc->mark == DESC_NCOMP) | 637 | desc->mark != DESC_COMPLETED && |
638 | desc->mark != DESC_WAITING); | ||
639 | |||
640 | /* | ||
641 | * queue is ordered, and we use this loop to (1) clean up all | ||
642 | * completed descriptors, and to (2) update descriptor flags of | ||
643 | * any chunks in a (partially) completed chain | ||
644 | */ | ||
645 | if (!all && desc->mark == DESC_SUBMITTED && | ||
646 | desc->cookie != cookie) | ||
360 | break; | 647 | break; |
361 | 648 | ||
362 | /* send data sesc */ | 649 | if (tx->cookie > 0) |
363 | callback = desc->async_tx.callback; | 650 | cookie = tx->cookie; |
364 | callback_param = desc->async_tx.callback_param; | 651 | |
652 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | ||
653 | if (sh_chan->completed_cookie != desc->cookie - 1) | ||
654 | dev_dbg(sh_chan->dev, | ||
655 | "Completing cookie %d, expected %d\n", | ||
656 | desc->cookie, | ||
657 | sh_chan->completed_cookie + 1); | ||
658 | sh_chan->completed_cookie = desc->cookie; | ||
659 | } | ||
660 | |||
661 | /* Call callback on the last chunk */ | ||
662 | if (desc->mark == DESC_COMPLETED && tx->callback) { | ||
663 | desc->mark = DESC_WAITING; | ||
664 | callback = tx->callback; | ||
665 | param = tx->callback_param; | ||
666 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", | ||
667 | tx->cookie, tx, sh_chan->id); | ||
668 | BUG_ON(desc->chunks != 1); | ||
669 | break; | ||
670 | } | ||
365 | 671 | ||
366 | /* Remove from ld_queue list */ | 672 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { |
367 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); | 673 | if (desc->mark == DESC_COMPLETED) { |
674 | BUG_ON(tx->cookie < 0); | ||
675 | desc->mark = DESC_WAITING; | ||
676 | } | ||
677 | head_acked = async_tx_test_ack(tx); | ||
678 | } else { | ||
679 | switch (desc->mark) { | ||
680 | case DESC_COMPLETED: | ||
681 | desc->mark = DESC_WAITING; | ||
682 | /* Fall through */ | ||
683 | case DESC_WAITING: | ||
684 | if (head_acked) | ||
685 | async_tx_ack(&desc->async_tx); | ||
686 | } | ||
687 | } | ||
368 | 688 | ||
369 | dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", | 689 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", |
370 | desc); | 690 | tx, tx->cookie); |
371 | 691 | ||
372 | list_move(&desc->node, &sh_chan->ld_free); | 692 | if (((desc->mark == DESC_COMPLETED || |
373 | /* Run the link descriptor callback function */ | 693 | desc->mark == DESC_WAITING) && |
374 | if (callback) { | 694 | async_tx_test_ack(&desc->async_tx)) || all) { |
375 | spin_unlock_bh(&sh_chan->desc_lock); | 695 | /* Remove from ld_queue list */ |
376 | dev_dbg(sh_chan->dev, "link descriptor %p callback\n", | 696 | desc->mark = DESC_IDLE; |
377 | desc); | 697 | list_move(&desc->node, &sh_chan->ld_free); |
378 | callback(callback_param); | ||
379 | spin_lock_bh(&sh_chan->desc_lock); | ||
380 | } | 698 | } |
381 | } | 699 | } |
382 | spin_unlock_bh(&sh_chan->desc_lock); | 700 | spin_unlock_bh(&sh_chan->desc_lock); |
701 | |||
702 | if (callback) | ||
703 | callback(param); | ||
704 | |||
705 | return callback; | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * sh_chan_ld_cleanup - Clean up link descriptors | ||
710 | * | ||
711 | * This function cleans up the ld_queue of DMA channel. | ||
712 | */ | ||
713 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | ||
714 | { | ||
715 | while (__ld_cleanup(sh_chan, all)) | ||
716 | ; | ||
383 | } | 717 | } |
384 | 718 | ||
385 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 719 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
386 | { | 720 | { |
387 | struct list_head *ld_node; | 721 | struct sh_desc *desc; |
388 | struct sh_dmae_regs hw; | ||
389 | 722 | ||
723 | spin_lock_bh(&sh_chan->desc_lock); | ||
390 | /* DMA work check */ | 724 | /* DMA work check */ |
391 | if (dmae_is_idle(sh_chan)) | 725 | if (dmae_is_busy(sh_chan)) { |
726 | spin_unlock_bh(&sh_chan->desc_lock); | ||
392 | return; | 727 | return; |
393 | |||
394 | /* Find the first un-transfer desciptor */ | ||
395 | for (ld_node = sh_chan->ld_queue.next; | ||
396 | (ld_node != &sh_chan->ld_queue) | ||
397 | && (to_sh_desc(ld_node)->mark == DESC_COMP); | ||
398 | ld_node = ld_node->next) | ||
399 | cpu_relax(); | ||
400 | |||
401 | if (ld_node != &sh_chan->ld_queue) { | ||
402 | /* Get the ld start address from ld_queue */ | ||
403 | hw = to_sh_desc(ld_node)->hw; | ||
404 | dmae_set_reg(sh_chan, hw); | ||
405 | dmae_start(sh_chan); | ||
406 | } | 728 | } |
729 | |||
730 | /* Find the first not transferred desciptor */ | ||
731 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
732 | if (desc->mark == DESC_SUBMITTED) { | ||
733 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | ||
734 | desc->async_tx.cookie, sh_chan->id, | ||
735 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); | ||
736 | /* Get the ld start address from ld_queue */ | ||
737 | dmae_set_reg(sh_chan, &desc->hw); | ||
738 | dmae_start(sh_chan); | ||
739 | break; | ||
740 | } | ||
741 | |||
742 | spin_unlock_bh(&sh_chan->desc_lock); | ||
407 | } | 743 | } |
408 | 744 | ||
409 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | 745 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) |
@@ -420,13 +756,13 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
420 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 756 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
421 | dma_cookie_t last_used; | 757 | dma_cookie_t last_used; |
422 | dma_cookie_t last_complete; | 758 | dma_cookie_t last_complete; |
759 | enum dma_status status; | ||
423 | 760 | ||
424 | sh_dmae_chan_ld_cleanup(sh_chan); | 761 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
425 | 762 | ||
426 | last_used = chan->cookie; | 763 | last_used = chan->cookie; |
427 | last_complete = sh_chan->completed_cookie; | 764 | last_complete = sh_chan->completed_cookie; |
428 | if (last_complete == -EBUSY) | 765 | BUG_ON(last_complete < 0); |
429 | last_complete = last_used; | ||
430 | 766 | ||
431 | if (done) | 767 | if (done) |
432 | *done = last_complete; | 768 | *done = last_complete; |
@@ -434,7 +770,27 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, | |||
434 | if (used) | 770 | if (used) |
435 | *used = last_used; | 771 | *used = last_used; |
436 | 772 | ||
437 | return dma_async_is_complete(cookie, last_complete, last_used); | 773 | spin_lock_bh(&sh_chan->desc_lock); |
774 | |||
775 | status = dma_async_is_complete(cookie, last_complete, last_used); | ||
776 | |||
777 | /* | ||
778 | * If we don't find cookie on the queue, it has been aborted and we have | ||
779 | * to report error | ||
780 | */ | ||
781 | if (status != DMA_SUCCESS) { | ||
782 | struct sh_desc *desc; | ||
783 | status = DMA_ERROR; | ||
784 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | ||
785 | if (desc->cookie == cookie) { | ||
786 | status = DMA_IN_PROGRESS; | ||
787 | break; | ||
788 | } | ||
789 | } | ||
790 | |||
791 | spin_unlock_bh(&sh_chan->desc_lock); | ||
792 | |||
793 | return status; | ||
438 | } | 794 | } |
439 | 795 | ||
440 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 796 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
@@ -457,99 +813,85 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
457 | #if defined(CONFIG_CPU_SH4) | 813 | #if defined(CONFIG_CPU_SH4) |
458 | static irqreturn_t sh_dmae_err(int irq, void *data) | 814 | static irqreturn_t sh_dmae_err(int irq, void *data) |
459 | { | 815 | { |
460 | int err = 0; | ||
461 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 816 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; |
817 | int i; | ||
462 | 818 | ||
463 | /* IRQ Multi */ | 819 | /* halt the dma controller */ |
464 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 820 | sh_dmae_ctl_stop(shdev); |
465 | int cnt = 0; | 821 | |
466 | switch (irq) { | 822 | /* We cannot detect, which channel caused the error, have to reset all */ |
467 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 823 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
468 | case DMTE6_IRQ: | 824 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
469 | cnt++; | 825 | if (sh_chan) { |
470 | #endif | 826 | struct sh_desc *desc; |
471 | case DMTE0_IRQ: | 827 | /* Stop the channel */ |
472 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { | 828 | dmae_halt(sh_chan); |
473 | disable_irq(irq); | 829 | /* Complete all */ |
474 | return IRQ_HANDLED; | 830 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
831 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
832 | desc->mark = DESC_IDLE; | ||
833 | if (tx->callback) | ||
834 | tx->callback(tx->callback_param); | ||
475 | } | 835 | } |
476 | default: | 836 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); |
477 | return IRQ_NONE; | ||
478 | } | ||
479 | } else { | ||
480 | /* reset dma controller */ | ||
481 | err = sh_dmae_rst(0); | ||
482 | if (err) | ||
483 | return err; | ||
484 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
485 | err = sh_dmae_rst(1); | ||
486 | if (err) | ||
487 | return err; | ||
488 | } | 837 | } |
489 | disable_irq(irq); | ||
490 | return IRQ_HANDLED; | ||
491 | } | 838 | } |
839 | sh_dmae_rst(shdev); | ||
840 | |||
841 | return IRQ_HANDLED; | ||
492 | } | 842 | } |
493 | #endif | 843 | #endif |
494 | 844 | ||
495 | static void dmae_do_tasklet(unsigned long data) | 845 | static void dmae_do_tasklet(unsigned long data) |
496 | { | 846 | { |
497 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 847 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
498 | struct sh_desc *desc, *_desc, *cur_desc = NULL; | 848 | struct sh_desc *desc; |
499 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 849 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
500 | list_for_each_entry_safe(desc, _desc, | 850 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); |
501 | &sh_chan->ld_queue, node) { | 851 | |
502 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { | 852 | spin_lock(&sh_chan->desc_lock); |
503 | cur_desc = desc; | 853 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
854 | if (desc->mark == DESC_SUBMITTED && | ||
855 | ((desc->direction == DMA_FROM_DEVICE && | ||
856 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | ||
857 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | ||
858 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | ||
859 | desc->async_tx.cookie, &desc->async_tx, | ||
860 | desc->hw.dar); | ||
861 | desc->mark = DESC_COMPLETED; | ||
504 | break; | 862 | break; |
505 | } | 863 | } |
506 | } | 864 | } |
865 | spin_unlock(&sh_chan->desc_lock); | ||
507 | 866 | ||
508 | if (cur_desc) { | ||
509 | switch (cur_desc->async_tx.cookie) { | ||
510 | case 0: /* other desc data */ | ||
511 | break; | ||
512 | case -EBUSY: /* last desc */ | ||
513 | sh_chan->completed_cookie = | ||
514 | cur_desc->async_tx.cookie; | ||
515 | break; | ||
516 | default: /* first desc ( 0 < )*/ | ||
517 | sh_chan->completed_cookie = | ||
518 | cur_desc->async_tx.cookie - 1; | ||
519 | break; | ||
520 | } | ||
521 | cur_desc->mark = DESC_COMP; | ||
522 | } | ||
523 | /* Next desc */ | 867 | /* Next desc */ |
524 | sh_chan_xfer_ld_queue(sh_chan); | 868 | sh_chan_xfer_ld_queue(sh_chan); |
525 | sh_dmae_chan_ld_cleanup(sh_chan); | 869 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
526 | } | ||
527 | |||
528 | static unsigned int get_dmae_irq(unsigned int id) | ||
529 | { | ||
530 | unsigned int irq = 0; | ||
531 | if (id < ARRAY_SIZE(dmte_irq_map)) | ||
532 | irq = dmte_irq_map[id]; | ||
533 | return irq; | ||
534 | } | 870 | } |
535 | 871 | ||
536 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | 872 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
873 | int irq, unsigned long flags) | ||
537 | { | 874 | { |
538 | int err; | 875 | int err; |
539 | unsigned int irq = get_dmae_irq(id); | 876 | struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; |
540 | unsigned long irqflags = IRQF_DISABLED; | 877 | struct platform_device *pdev = to_platform_device(shdev->common.dev); |
541 | struct sh_dmae_chan *new_sh_chan; | 878 | struct sh_dmae_chan *new_sh_chan; |
542 | 879 | ||
543 | /* alloc channel */ | 880 | /* alloc channel */ |
544 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | 881 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); |
545 | if (!new_sh_chan) { | 882 | if (!new_sh_chan) { |
546 | dev_err(shdev->common.dev, "No free memory for allocating " | 883 | dev_err(shdev->common.dev, |
547 | "dma channels!\n"); | 884 | "No free memory for allocating dma channels!\n"); |
548 | return -ENOMEM; | 885 | return -ENOMEM; |
549 | } | 886 | } |
550 | 887 | ||
888 | /* copy struct dma_device */ | ||
889 | new_sh_chan->common.device = &shdev->common; | ||
890 | |||
551 | new_sh_chan->dev = shdev->common.dev; | 891 | new_sh_chan->dev = shdev->common.dev; |
552 | new_sh_chan->id = id; | 892 | new_sh_chan->id = id; |
893 | new_sh_chan->irq = irq; | ||
894 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | ||
553 | 895 | ||
554 | /* Init DMA tasklet */ | 896 | /* Init DMA tasklet */ |
555 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 897 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
@@ -564,41 +906,27 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) | |||
564 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | 906 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); |
565 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | 907 | INIT_LIST_HEAD(&new_sh_chan->ld_free); |
566 | 908 | ||
567 | /* copy struct dma_device */ | ||
568 | new_sh_chan->common.device = &shdev->common; | ||
569 | |||
570 | /* Add the channel to DMA device channel list */ | 909 | /* Add the channel to DMA device channel list */ |
571 | list_add_tail(&new_sh_chan->common.device_node, | 910 | list_add_tail(&new_sh_chan->common.device_node, |
572 | &shdev->common.channels); | 911 | &shdev->common.channels); |
573 | shdev->common.chancnt++; | 912 | shdev->common.chancnt++; |
574 | 913 | ||
575 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 914 | if (pdev->id >= 0) |
576 | irqflags = IRQF_SHARED; | 915 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
577 | #if defined(DMTE6_IRQ) | 916 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); |
578 | if (irq >= DMTE6_IRQ) | 917 | else |
579 | irq = DMTE6_IRQ; | 918 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
580 | else | 919 | "sh-dma%d", new_sh_chan->id); |
581 | #endif | ||
582 | irq = DMTE0_IRQ; | ||
583 | } | ||
584 | |||
585 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | ||
586 | "sh-dmae%d", new_sh_chan->id); | ||
587 | 920 | ||
588 | /* set up channel irq */ | 921 | /* set up channel irq */ |
589 | err = request_irq(irq, &sh_dmae_interrupt, | 922 | err = request_irq(irq, &sh_dmae_interrupt, flags, |
590 | irqflags, new_sh_chan->dev_id, new_sh_chan); | 923 | new_sh_chan->dev_id, new_sh_chan); |
591 | if (err) { | 924 | if (err) { |
592 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 925 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
593 | "with return %d\n", id, err); | 926 | "with return %d\n", id, err); |
594 | goto err_no_irq; | 927 | goto err_no_irq; |
595 | } | 928 | } |
596 | 929 | ||
597 | /* CHCR register control function */ | ||
598 | new_sh_chan->set_chcr = dmae_set_chcr; | ||
599 | /* DMARS register control function */ | ||
600 | new_sh_chan->set_dmars = dmae_set_dmars; | ||
601 | |||
602 | shdev->chan[id] = new_sh_chan; | 930 | shdev->chan[id] = new_sh_chan; |
603 | return 0; | 931 | return 0; |
604 | 932 | ||
@@ -615,12 +943,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
615 | 943 | ||
616 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | 944 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { |
617 | if (shdev->chan[i]) { | 945 | if (shdev->chan[i]) { |
618 | struct sh_dmae_chan *shchan = shdev->chan[i]; | 946 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
619 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) | 947 | |
620 | free_irq(dmte_irq_map[i], shchan); | 948 | free_irq(sh_chan->irq, sh_chan); |
621 | 949 | ||
622 | list_del(&shchan->common.device_node); | 950 | list_del(&sh_chan->common.device_node); |
623 | kfree(shchan); | 951 | kfree(sh_chan); |
624 | shdev->chan[i] = NULL; | 952 | shdev->chan[i] = NULL; |
625 | } | 953 | } |
626 | } | 954 | } |
@@ -629,83 +957,164 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
629 | 957 | ||
630 | static int __init sh_dmae_probe(struct platform_device *pdev) | 958 | static int __init sh_dmae_probe(struct platform_device *pdev) |
631 | { | 959 | { |
632 | int err = 0, cnt, ecnt; | 960 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
633 | unsigned long irqflags = IRQF_DISABLED; | 961 | unsigned long irqflags = IRQF_DISABLED, |
634 | #if defined(CONFIG_CPU_SH4) | 962 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; |
635 | int eirq[] = { DMAE0_IRQ, | 963 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; |
636 | #if defined(DMAE1_IRQ) | 964 | int err, i, irq_cnt = 0, irqres = 0; |
637 | DMAE1_IRQ | ||
638 | #endif | ||
639 | }; | ||
640 | #endif | ||
641 | struct sh_dmae_device *shdev; | 965 | struct sh_dmae_device *shdev; |
966 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | ||
642 | 967 | ||
643 | /* get platform data */ | 968 | /* get platform data */ |
644 | if (!pdev->dev.platform_data) | 969 | if (!pdata || !pdata->channel_num) |
970 | return -ENODEV; | ||
971 | |||
972 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
973 | /* DMARS area is optional, if absent, this controller cannot do slave DMA */ | ||
974 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
975 | /* | ||
976 | * IRQ resources: | ||
977 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | ||
978 | * the error IRQ, in which case it is the only IRQ in this resource: | ||
979 | * start == end. If it is the only IRQ resource, all channels also | ||
980 | * use the same IRQ. | ||
981 | * 2. DMA channel IRQ resources can be specified one per resource or in | ||
982 | * ranges (start != end) | ||
983 | * 3. iff all events (channels and, optionally, error) on this | ||
984 | * controller use the same IRQ, only one IRQ resource can be | ||
985 | * specified, otherwise there must be one IRQ per channel, even if | ||
986 | * some of them are equal | ||
987 | * 4. if all IRQs on this controller are equal or if some specific IRQs | ||
988 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | ||
989 | * requested with the IRQF_SHARED flag | ||
990 | */ | ||
991 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
992 | if (!chan || !errirq_res) | ||
645 | return -ENODEV; | 993 | return -ENODEV; |
646 | 994 | ||
995 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | ||
996 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | ||
997 | return -EBUSY; | ||
998 | } | ||
999 | |||
1000 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | ||
1001 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | ||
1002 | err = -EBUSY; | ||
1003 | goto ermrdmars; | ||
1004 | } | ||
1005 | |||
1006 | err = -ENOMEM; | ||
647 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | 1007 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
648 | if (!shdev) { | 1008 | if (!shdev) { |
649 | dev_err(&pdev->dev, "No enough memory\n"); | 1009 | dev_err(&pdev->dev, "Not enough memory\n"); |
650 | return -ENOMEM; | 1010 | goto ealloc; |
1011 | } | ||
1012 | |||
1013 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | ||
1014 | if (!shdev->chan_reg) | ||
1015 | goto emapchan; | ||
1016 | if (dmars) { | ||
1017 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | ||
1018 | if (!shdev->dmars) | ||
1019 | goto emapdmars; | ||
651 | } | 1020 | } |
652 | 1021 | ||
653 | /* platform data */ | 1022 | /* platform data */ |
654 | memcpy(&shdev->pdata, pdev->dev.platform_data, | 1023 | shdev->pdata = pdata; |
655 | sizeof(struct sh_dmae_pdata)); | 1024 | |
1025 | pm_runtime_enable(&pdev->dev); | ||
1026 | pm_runtime_get_sync(&pdev->dev); | ||
656 | 1027 | ||
657 | /* reset dma controller */ | 1028 | /* reset dma controller */ |
658 | err = sh_dmae_rst(0); | 1029 | err = sh_dmae_rst(shdev); |
659 | if (err) | 1030 | if (err) |
660 | goto rst_err; | 1031 | goto rst_err; |
661 | 1032 | ||
662 | /* SH7780/85/23 has DMAOR1 */ | ||
663 | if (shdev->pdata.mode & SHDMA_DMAOR1) { | ||
664 | err = sh_dmae_rst(1); | ||
665 | if (err) | ||
666 | goto rst_err; | ||
667 | } | ||
668 | |||
669 | INIT_LIST_HEAD(&shdev->common.channels); | 1033 | INIT_LIST_HEAD(&shdev->common.channels); |
670 | 1034 | ||
671 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 1035 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
1036 | if (dmars) | ||
1037 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | ||
1038 | |||
672 | shdev->common.device_alloc_chan_resources | 1039 | shdev->common.device_alloc_chan_resources |
673 | = sh_dmae_alloc_chan_resources; | 1040 | = sh_dmae_alloc_chan_resources; |
674 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | 1041 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; |
675 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | 1042 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; |
676 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; | 1043 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; |
677 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | 1044 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
1045 | |||
1046 | /* Compulsory for DMA_SLAVE fields */ | ||
1047 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | ||
1048 | shdev->common.device_terminate_all = sh_dmae_terminate_all; | ||
1049 | |||
678 | shdev->common.dev = &pdev->dev; | 1050 | shdev->common.dev = &pdev->dev; |
1051 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | ||
1052 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
679 | 1053 | ||
680 | #if defined(CONFIG_CPU_SH4) | 1054 | #if defined(CONFIG_CPU_SH4) |
681 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ | 1055 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
682 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 1056 | |
1057 | if (!chanirq_res) | ||
1058 | chanirq_res = errirq_res; | ||
1059 | else | ||
1060 | irqres++; | ||
1061 | |||
1062 | if (chanirq_res == errirq_res || | ||
1063 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | ||
683 | irqflags = IRQF_SHARED; | 1064 | irqflags = IRQF_SHARED; |
684 | eirq[0] = DMTE0_IRQ; | 1065 | |
685 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) | 1066 | errirq = errirq_res->start; |
686 | eirq[1] = DMTE6_IRQ; | 1067 | |
687 | #endif | 1068 | err = request_irq(errirq, sh_dmae_err, irqflags, |
1069 | "DMAC Address Error", shdev); | ||
1070 | if (err) { | ||
1071 | dev_err(&pdev->dev, | ||
1072 | "DMA failed requesting irq #%d, error %d\n", | ||
1073 | errirq, err); | ||
1074 | goto eirq_err; | ||
688 | } | 1075 | } |
689 | 1076 | ||
690 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { | 1077 | #else |
691 | err = request_irq(eirq[ecnt], sh_dmae_err, | 1078 | chanirq_res = errirq_res; |
692 | irqflags, "DMAC Address Error", shdev); | 1079 | #endif /* CONFIG_CPU_SH4 */ |
693 | if (err) { | 1080 | |
694 | dev_err(&pdev->dev, "DMA device request_irq" | 1081 | if (chanirq_res->start == chanirq_res->end && |
695 | "error (irq %d) with return %d\n", | 1082 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
696 | eirq[ecnt], err); | 1083 | /* Special case - all multiplexed */ |
697 | goto eirq_err; | 1084 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
1085 | chan_irq[irq_cnt] = chanirq_res->start; | ||
1086 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
698 | } | 1087 | } |
1088 | } else { | ||
1089 | do { | ||
1090 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | ||
1091 | if ((errirq_res->flags & IORESOURCE_BITS) == | ||
1092 | IORESOURCE_IRQ_SHAREABLE) | ||
1093 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
1094 | else | ||
1095 | chan_flag[irq_cnt] = IRQF_DISABLED; | ||
1096 | dev_dbg(&pdev->dev, | ||
1097 | "Found IRQ %d for channel %d\n", | ||
1098 | i, irq_cnt); | ||
1099 | chan_irq[irq_cnt++] = i; | ||
1100 | } | ||
1101 | chanirq_res = platform_get_resource(pdev, | ||
1102 | IORESOURCE_IRQ, ++irqres); | ||
1103 | } while (irq_cnt < pdata->channel_num && chanirq_res); | ||
699 | } | 1104 | } |
700 | #endif /* CONFIG_CPU_SH4 */ | 1105 | |
1106 | if (irq_cnt < pdata->channel_num) | ||
1107 | goto eirqres; | ||
701 | 1108 | ||
702 | /* Create DMA Channel */ | 1109 | /* Create DMA Channel */ |
703 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { | 1110 | for (i = 0; i < pdata->channel_num; i++) { |
704 | err = sh_dmae_chan_probe(shdev, cnt); | 1111 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); |
705 | if (err) | 1112 | if (err) |
706 | goto chan_probe_err; | 1113 | goto chan_probe_err; |
707 | } | 1114 | } |
708 | 1115 | ||
1116 | pm_runtime_put(&pdev->dev); | ||
1117 | |||
709 | platform_set_drvdata(pdev, shdev); | 1118 | platform_set_drvdata(pdev, shdev); |
710 | dma_async_device_register(&shdev->common); | 1119 | dma_async_device_register(&shdev->common); |
711 | 1120 | ||
@@ -713,13 +1122,24 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
713 | 1122 | ||
714 | chan_probe_err: | 1123 | chan_probe_err: |
715 | sh_dmae_chan_remove(shdev); | 1124 | sh_dmae_chan_remove(shdev); |
716 | 1125 | eirqres: | |
1126 | #if defined(CONFIG_CPU_SH4) | ||
1127 | free_irq(errirq, shdev); | ||
717 | eirq_err: | 1128 | eirq_err: |
718 | for (ecnt-- ; ecnt >= 0; ecnt--) | 1129 | #endif |
719 | free_irq(eirq[ecnt], shdev); | ||
720 | |||
721 | rst_err: | 1130 | rst_err: |
1131 | pm_runtime_put(&pdev->dev); | ||
1132 | if (dmars) | ||
1133 | iounmap(shdev->dmars); | ||
1134 | emapdmars: | ||
1135 | iounmap(shdev->chan_reg); | ||
1136 | emapchan: | ||
722 | kfree(shdev); | 1137 | kfree(shdev); |
1138 | ealloc: | ||
1139 | if (dmars) | ||
1140 | release_mem_region(dmars->start, resource_size(dmars)); | ||
1141 | ermrdmars: | ||
1142 | release_mem_region(chan->start, resource_size(chan)); | ||
723 | 1143 | ||
724 | return err; | 1144 | return err; |
725 | } | 1145 | } |
@@ -727,36 +1147,39 @@ rst_err: | |||
727 | static int __exit sh_dmae_remove(struct platform_device *pdev) | 1147 | static int __exit sh_dmae_remove(struct platform_device *pdev) |
728 | { | 1148 | { |
729 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1149 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1150 | struct resource *res; | ||
1151 | int errirq = platform_get_irq(pdev, 0); | ||
730 | 1152 | ||
731 | dma_async_device_unregister(&shdev->common); | 1153 | dma_async_device_unregister(&shdev->common); |
732 | 1154 | ||
733 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { | 1155 | if (errirq > 0) |
734 | free_irq(DMTE0_IRQ, shdev); | 1156 | free_irq(errirq, shdev); |
735 | #if defined(DMTE6_IRQ) | ||
736 | free_irq(DMTE6_IRQ, shdev); | ||
737 | #endif | ||
738 | } | ||
739 | 1157 | ||
740 | /* channel data remove */ | 1158 | /* channel data remove */ |
741 | sh_dmae_chan_remove(shdev); | 1159 | sh_dmae_chan_remove(shdev); |
742 | 1160 | ||
743 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { | 1161 | pm_runtime_disable(&pdev->dev); |
744 | free_irq(DMAE0_IRQ, shdev); | 1162 | |
745 | #if defined(DMAE1_IRQ) | 1163 | if (shdev->dmars) |
746 | free_irq(DMAE1_IRQ, shdev); | 1164 | iounmap(shdev->dmars); |
747 | #endif | 1165 | iounmap(shdev->chan_reg); |
748 | } | 1166 | |
749 | kfree(shdev); | 1167 | kfree(shdev); |
750 | 1168 | ||
1169 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1170 | if (res) | ||
1171 | release_mem_region(res->start, resource_size(res)); | ||
1172 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1173 | if (res) | ||
1174 | release_mem_region(res->start, resource_size(res)); | ||
1175 | |||
751 | return 0; | 1176 | return 0; |
752 | } | 1177 | } |
753 | 1178 | ||
754 | static void sh_dmae_shutdown(struct platform_device *pdev) | 1179 | static void sh_dmae_shutdown(struct platform_device *pdev) |
755 | { | 1180 | { |
756 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1181 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
757 | sh_dmae_ctl_stop(0); | 1182 | sh_dmae_ctl_stop(shdev); |
758 | if (shdev->pdata.mode & SHDMA_DMAOR1) | ||
759 | sh_dmae_ctl_stop(1); | ||
760 | } | 1183 | } |
761 | 1184 | ||
762 | static struct platform_driver sh_dmae_driver = { | 1185 | static struct platform_driver sh_dmae_driver = { |