aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r--arch/arm/mach-tegra/dma.c910
1 files changed, 660 insertions, 250 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index f4ef5eb317b..35499916e2b 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * System DMA driver for NVIDIA Tegra SoCs 4 * System DMA driver for NVIDIA Tegra SoCs
5 * 5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation. 6 * Copyright (c) 2008-2012, NVIDIA Corporation.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -28,10 +28,11 @@
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/syscore_ops.h>
31#include <mach/dma.h> 32#include <mach/dma.h>
32#include <mach/irqs.h> 33#include <mach/irqs.h>
33#include <mach/iomap.h> 34#include <mach/iomap.h>
34#include <mach/suspend.h> 35#include <mach/clk.h>
35 36
36#define APB_DMA_GEN 0x000 37#define APB_DMA_GEN 0x000
37#define GEN_ENABLE (1<<31) 38#define GEN_ENABLE (1<<31)
@@ -50,12 +51,10 @@
50#define CSR_ONCE (1<<27) 51#define CSR_ONCE (1<<27)
51#define CSR_FLOW (1<<21) 52#define CSR_FLOW (1<<21)
52#define CSR_REQ_SEL_SHIFT 16 53#define CSR_REQ_SEL_SHIFT 16
53#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
54#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
55#define CSR_WCOUNT_SHIFT 2 54#define CSR_WCOUNT_SHIFT 2
56#define CSR_WCOUNT_MASK 0xFFFC 55#define CSR_WCOUNT_MASK 0xFFFC
57 56
58#define APB_DMA_CHAN_STA 0x004 57#define APB_DMA_CHAN_STA 0x004
59#define STA_BUSY (1<<31) 58#define STA_BUSY (1<<31)
60#define STA_ISE_EOC (1<<30) 59#define STA_ISE_EOC (1<<30)
61#define STA_HALT (1<<29) 60#define STA_HALT (1<<29)
@@ -63,9 +62,9 @@
63#define STA_COUNT_SHIFT 2 62#define STA_COUNT_SHIFT 2
64#define STA_COUNT_MASK 0xFFFC 63#define STA_COUNT_MASK 0xFFFC
65 64
66#define APB_DMA_CHAN_AHB_PTR 0x010 65#define APB_DMA_CHAN_AHB_PTR 0x010
67 66
68#define APB_DMA_CHAN_AHB_SEQ 0x014 67#define APB_DMA_CHAN_AHB_SEQ 0x014
69#define AHB_SEQ_INTR_ENB (1<<31) 68#define AHB_SEQ_INTR_ENB (1<<31)
70#define AHB_SEQ_BUS_WIDTH_SHIFT 28 69#define AHB_SEQ_BUS_WIDTH_SHIFT 28
71#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT) 70#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
@@ -83,9 +82,9 @@
83#define AHB_SEQ_WRAP_SHIFT 16 82#define AHB_SEQ_WRAP_SHIFT 16
84#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT) 83#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
85 84
86#define APB_DMA_CHAN_APB_PTR 0x018 85#define APB_DMA_CHAN_APB_PTR 0x018
87 86
88#define APB_DMA_CHAN_APB_SEQ 0x01c 87#define APB_DMA_CHAN_APB_SEQ 0x01c
89#define APB_SEQ_BUS_WIDTH_SHIFT 28 88#define APB_SEQ_BUS_WIDTH_SHIFT 28
90#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT) 89#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
91#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT) 90#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
@@ -97,21 +96,35 @@
97#define APB_SEQ_WRAP_SHIFT 16 96#define APB_SEQ_WRAP_SHIFT 16
98#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT) 97#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
99 98
99#ifdef CONFIG_ARCH_TEGRA_2x_SOC
100#define TEGRA_SYSTEM_DMA_CH_NR 16 100#define TEGRA_SYSTEM_DMA_CH_NR 16
101#else
102#define TEGRA_SYSTEM_DMA_CH_NR 32
103#endif
101#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4 104#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
102#define TEGRA_SYSTEM_DMA_CH_MIN 0 105#define TEGRA_SYSTEM_DMA_CH_MIN 0
103#define TEGRA_SYSTEM_DMA_CH_MAX \ 106#define TEGRA_SYSTEM_DMA_CH_MAX \
104 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1) 107 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
105 108
106#define NV_DMA_MAX_TRASFER_SIZE 0x10000 109/* Maximum dma transfer size */
110#define TEGRA_DMA_MAX_TRANSFER_SIZE 0x10000
111
112static struct clk *dma_clk;
107 113
108const unsigned int ahb_addr_wrap_table[8] = { 114static const unsigned int ahb_addr_wrap_table[8] = {
109 0, 32, 64, 128, 256, 512, 1024, 2048 115 0, 32, 64, 128, 256, 512, 1024, 2048
110}; 116};
111 117
112const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64}; 118static const unsigned int apb_addr_wrap_table[8] = {
119 0, 1, 2, 4, 8, 16, 32, 64
120};
121
122static const unsigned int bus_width_table[5] = {
123 8, 16, 32, 64, 128
124};
113 125
114const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128}; 126static void __iomem *general_dma_addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
127typedef void (*dma_isr_handler)(struct tegra_dma_channel *ch);
115 128
116#define TEGRA_DMA_NAME_SIZE 16 129#define TEGRA_DMA_NAME_SIZE 16
117struct tegra_dma_channel { 130struct tegra_dma_channel {
@@ -119,45 +132,38 @@ struct tegra_dma_channel {
119 int id; 132 int id;
120 spinlock_t lock; 133 spinlock_t lock;
121 char name[TEGRA_DMA_NAME_SIZE]; 134 char name[TEGRA_DMA_NAME_SIZE];
135 char client_name[TEGRA_DMA_NAME_SIZE];
122 void __iomem *addr; 136 void __iomem *addr;
123 int mode; 137 int mode;
124 int irq; 138 int irq;
125 int req_transfer_count; 139 dma_callback callback;
140 struct tegra_dma_req *cb_req;
141 dma_isr_handler isr_handler;
126}; 142};
127 143
128#define NV_DMA_MAX_CHANNELS 32 144#define NV_DMA_MAX_CHANNELS 32
129 145
130static bool tegra_dma_initialized; 146static bool tegra_dma_initialized;
131static DEFINE_MUTEX(tegra_dma_lock); 147static DEFINE_MUTEX(tegra_dma_lock);
148static DEFINE_SPINLOCK(enable_lock);
132 149
133static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); 150static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
134static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; 151static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
135 152
136static void tegra_dma_update_hw(struct tegra_dma_channel *ch, 153static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
137 struct tegra_dma_req *req); 154 struct tegra_dma_req *req);
138static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, 155static bool tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
139 struct tegra_dma_req *req); 156 struct tegra_dma_req *req);
140static void tegra_dma_stop(struct tegra_dma_channel *ch); 157static void handle_oneshot_dma(struct tegra_dma_channel *ch);
158static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch);
159static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch);
141 160
142void tegra_dma_flush(struct tegra_dma_channel *ch) 161void tegra_dma_flush(struct tegra_dma_channel *ch)
143{ 162{
144} 163}
145EXPORT_SYMBOL(tegra_dma_flush); 164EXPORT_SYMBOL(tegra_dma_flush);
146 165
147void tegra_dma_dequeue(struct tegra_dma_channel *ch) 166static void tegra_dma_stop(struct tegra_dma_channel *ch)
148{
149 struct tegra_dma_req *req;
150
151 if (tegra_dma_is_empty(ch))
152 return;
153
154 req = list_entry(ch->list.next, typeof(*req), node);
155
156 tegra_dma_dequeue_req(ch, req);
157 return;
158}
159
160void tegra_dma_stop(struct tegra_dma_channel *ch)
161{ 167{
162 u32 csr; 168 u32 csr;
163 u32 status; 169 u32 status;
@@ -176,36 +182,193 @@ void tegra_dma_stop(struct tegra_dma_channel *ch)
176 182
177int tegra_dma_cancel(struct tegra_dma_channel *ch) 183int tegra_dma_cancel(struct tegra_dma_channel *ch)
178{ 184{
179 u32 csr;
180 unsigned long irq_flags; 185 unsigned long irq_flags;
181 186
182 spin_lock_irqsave(&ch->lock, irq_flags); 187 spin_lock_irqsave(&ch->lock, irq_flags);
183 while (!list_empty(&ch->list)) 188 while (!list_empty(&ch->list))
184 list_del(ch->list.next); 189 list_del(ch->list.next);
185 190
186 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
187 csr &= ~CSR_REQ_SEL_MASK;
188 csr |= CSR_REQ_SEL_INVALID;
189 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
190
191 tegra_dma_stop(ch); 191 tegra_dma_stop(ch);
192 192
193 spin_unlock_irqrestore(&ch->lock, irq_flags); 193 spin_unlock_irqrestore(&ch->lock, irq_flags);
194 return 0; 194 return 0;
195} 195}
196EXPORT_SYMBOL(tegra_dma_cancel);
197
198static void pause_dma(bool wait_for_burst_complete)
199{
200 spin_lock(&enable_lock);
201 writel(0, general_dma_addr + APB_DMA_GEN);
202 if (wait_for_burst_complete)
203 udelay(20);
204}
205
206static void resume_dma(void)
207{
208 writel(GEN_ENABLE, general_dma_addr + APB_DMA_GEN);
209 spin_unlock(&enable_lock);
210}
211
212static void start_head_req(struct tegra_dma_channel *ch)
213{
214 struct tegra_dma_req *head_req;
215 if (!list_empty(&ch->list)) {
216 head_req = list_entry(ch->list.next, typeof(*head_req), node);
217 tegra_dma_update_hw(ch, head_req);
218 }
219}
220
221static void configure_next_req(struct tegra_dma_channel *ch,
222 struct tegra_dma_req *hreq)
223{
224 struct tegra_dma_req *next_req;
225 if (!list_is_last(&hreq->node, &ch->list)) {
226 next_req = list_entry(hreq->node.next, typeof(*next_req), node);
227 tegra_dma_update_hw_partial(ch, next_req);
228 }
229}
230
231static inline unsigned int get_req_xfer_word_count(
232 struct tegra_dma_channel *ch, struct tegra_dma_req *req)
233{
234 if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
235 return req->size >> 3;
236 else
237 return req->size >> 2;
238}
239
240static int get_current_xferred_count(struct tegra_dma_channel *ch,
241 struct tegra_dma_req *req, unsigned long status)
242{
243 int req_transfer_count;
244 req_transfer_count = get_req_xfer_word_count(ch, req) << 2;
245 return req_transfer_count - ((status & STA_COUNT_MASK) + 4);
246}
247
248static void tegra_dma_abort_req(struct tegra_dma_channel *ch,
249 struct tegra_dma_req *req, const char *warn_msg)
250{
251 unsigned long status = readl(ch->addr + APB_DMA_CHAN_STA);
252
253 /*
254 * Check if interrupt is pending.
255 * This api is called from isr and hence need not to call
256 * isr handle again, just update the byte_transferred.
257 */
258 if (status & STA_ISE_EOC)
259 req->bytes_transferred += get_req_xfer_word_count(ch, req) << 2;
260 tegra_dma_stop(ch);
261
262 req->bytes_transferred += get_current_xferred_count(ch, req, status);
263 req->status = -TEGRA_DMA_REQ_ERROR_STOPPED;
264 if (warn_msg)
265 WARN(1, KERN_WARNING "%s\n", warn_msg);
266 start_head_req(ch);
267}
268
269static void handle_continuous_head_request(struct tegra_dma_channel *ch,
270 struct tegra_dma_req *last_req)
271{
272 struct tegra_dma_req *hreq = NULL;
273
274 if (list_empty(&ch->list)) {
275 tegra_dma_abort_req(ch, last_req, NULL);
276 return;
277 }
278
279 /*
280 * Check that head req on list should be in flight.
281 * If it is not in flight then request came late
282 * and so need to abort dma and start next request
283 * immediately.
284 */
285 hreq = list_entry(ch->list.next, typeof(*hreq), node);
286 if (hreq->status != TEGRA_DMA_REQ_INFLIGHT) {
287 tegra_dma_abort_req(ch, last_req, "Req was not queued on time");
288 return;
289 }
290
291 /* Configure next request in single buffer mode */
292 if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE)
293 configure_next_req(ch, hreq);
294}
295
296static unsigned int get_channel_status(struct tegra_dma_channel *ch,
297 struct tegra_dma_req *req, bool is_stop_dma)
298{
299 unsigned int status;
300
301 if (is_stop_dma) {
302 /* STOP the DMA and get the transfer count.
303 * Getting the transfer count is tricky.
304 * - Globally disable DMA on all channels
305 * - Read the channel's status register to know the number
306 * of pending bytes to be transfered.
307 * - Stop the dma channel
308 * - Globally re-enable DMA to resume other transfers
309 */
310 pause_dma(true);
311 status = readl(ch->addr + APB_DMA_CHAN_STA);
312 tegra_dma_stop(ch);
313 resume_dma();
314 if (status & STA_ISE_EOC) {
315 pr_err("Got Dma Int here clearing");
316 writel(status, ch->addr + APB_DMA_CHAN_STA);
317 }
318 req->status = TEGRA_DMA_REQ_ERROR_ABORTED;
319 } else {
320 status = readl(ch->addr + APB_DMA_CHAN_STA);
321 }
322 return status;
323}
324
325/* should be called with the channel lock held */
326static unsigned int dma_active_count(struct tegra_dma_channel *ch,
327 struct tegra_dma_req *req, unsigned int status)
328{
329 unsigned int to_transfer;
330 unsigned int req_transfer_count;
331
332 unsigned int bytes_transferred;
333
334 to_transfer = ((status & STA_COUNT_MASK) >> STA_COUNT_SHIFT) + 1;
335 req_transfer_count = get_req_xfer_word_count(ch, req);
336 bytes_transferred = req_transfer_count;
337
338 if (status & STA_BUSY)
339 bytes_transferred -= to_transfer;
340
341 /*
342 * In continuous transfer mode, DMA only tracks the count of the
343 * half DMA buffer. So, if the DMA already finished half the DMA
344 * then add the half buffer to the completed count.
345 */
346 if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
347 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
348 bytes_transferred += req_transfer_count;
349
350 if (status & STA_ISE_EOC)
351 bytes_transferred += req_transfer_count;
352
353 bytes_transferred *= 4;
354
355 return bytes_transferred;
356}
196 357
197int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, 358int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
198 struct tegra_dma_req *_req) 359 struct tegra_dma_req *_req)
199{ 360{
200 unsigned int csr;
201 unsigned int status;
202 struct tegra_dma_req *req = NULL; 361 struct tegra_dma_req *req = NULL;
203 int found = 0; 362 int found = 0;
363 unsigned int status;
204 unsigned long irq_flags; 364 unsigned long irq_flags;
205 int to_transfer; 365 int stop = 0;
206 int req_transfer_count;
207 366
208 spin_lock_irqsave(&ch->lock, irq_flags); 367 spin_lock_irqsave(&ch->lock, irq_flags);
368
369 if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
370 stop = 1;
371
209 list_for_each_entry(req, &ch->list, node) { 372 list_for_each_entry(req, &ch->list, node) {
210 if (req == _req) { 373 if (req == _req) {
211 list_del(&req->node); 374 list_del(&req->node);
@@ -215,50 +378,15 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
215 } 378 }
216 if (!found) { 379 if (!found) {
217 spin_unlock_irqrestore(&ch->lock, irq_flags); 380 spin_unlock_irqrestore(&ch->lock, irq_flags);
218 return 0; 381 return -ENOENT;
219 } 382 }
220 383
221 /* STOP the DMA and get the transfer count. 384 if (!stop)
222 * Getting the transfer count is tricky. 385 goto skip_status;
223 * - Change the source selector to invalid to stop the DMA from
224 * FIFO to memory.
225 * - Read the status register to know the number of pending
226 * bytes to be transferred.
227 * - Finally stop or program the DMA to the next buffer in the
228 * list.
229 */
230 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
231 csr &= ~CSR_REQ_SEL_MASK;
232 csr |= CSR_REQ_SEL_INVALID;
233 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
234
235 /* Get the transfer count */
236 status = readl(ch->addr + APB_DMA_CHAN_STA);
237 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
238 req_transfer_count = ch->req_transfer_count;
239 req_transfer_count += 1;
240 to_transfer += 1;
241
242 req->bytes_transferred = req_transfer_count;
243
244 if (status & STA_BUSY)
245 req->bytes_transferred -= to_transfer;
246
247 /* In continuous transfer mode, DMA only tracks the count of the
248 * half DMA buffer. So, if the DMA already finished half the DMA
249 * then add the half buffer to the completed count.
250 *
251 * FIXME: There can be a race here. What if the req to
252 * dequue happens at the same time as the DMA just moved to
253 * the new buffer and SW didn't yet received the interrupt?
254 */
255 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
256 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
257 req->bytes_transferred += req_transfer_count;
258 386
259 req->bytes_transferred *= 4; 387 status = get_channel_status(ch, req, true);
388 req->bytes_transferred = dma_active_count(ch, req, status);
260 389
261 tegra_dma_stop(ch);
262 if (!list_empty(&ch->list)) { 390 if (!list_empty(&ch->list)) {
263 /* if the list is not empty, queue the next request */ 391 /* if the list is not empty, queue the next request */
264 struct tegra_dma_req *next_req; 392 struct tegra_dma_req *next_req;
@@ -266,6 +394,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
266 typeof(*next_req), node); 394 typeof(*next_req), node);
267 tegra_dma_update_hw(ch, next_req); 395 tegra_dma_update_hw(ch, next_req);
268 } 396 }
397skip_status:
269 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; 398 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
270 399
271 spin_unlock_irqrestore(&ch->lock, irq_flags); 400 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -309,49 +438,124 @@ bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
309} 438}
310EXPORT_SYMBOL(tegra_dma_is_req_inflight); 439EXPORT_SYMBOL(tegra_dma_is_req_inflight);
311 440
441int tegra_dma_get_transfer_count(struct tegra_dma_channel *ch,
442 struct tegra_dma_req *req)
443{
444 unsigned int status;
445 unsigned long irq_flags;
446 int bytes_transferred = 0;
447
448 if (IS_ERR_OR_NULL(ch))
449 BUG();
450
451 spin_lock_irqsave(&ch->lock, irq_flags);
452
453 if (list_entry(ch->list.next, struct tegra_dma_req, node) != req) {
454 spin_unlock_irqrestore(&ch->lock, irq_flags);
455 pr_debug("The dma request is not the head req\n");
456 return req->bytes_transferred;
457 }
458
459 if (req->status != TEGRA_DMA_REQ_INFLIGHT) {
460 spin_unlock_irqrestore(&ch->lock, irq_flags);
461 pr_debug("The dma request is not running\n");
462 return req->bytes_transferred;
463 }
464
465 status = get_channel_status(ch, req, false);
466 bytes_transferred = dma_active_count(ch, req, status);
467 spin_unlock_irqrestore(&ch->lock, irq_flags);
468 return bytes_transferred;
469}
470EXPORT_SYMBOL(tegra_dma_get_transfer_count);
471
312int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, 472int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
313 struct tegra_dma_req *req) 473 struct tegra_dma_req *req)
314{ 474{
315 unsigned long irq_flags; 475 unsigned long irq_flags;
316 struct tegra_dma_req *_req; 476 struct tegra_dma_req *_req;
317 int start_dma = 0; 477 int start_dma = 0;
478 struct tegra_dma_req *hreq, *hnreq;
318 479
319 if (req->size > NV_DMA_MAX_TRASFER_SIZE || 480 if (req->size > TEGRA_DMA_MAX_TRANSFER_SIZE ||
320 req->source_addr & 0x3 || req->dest_addr & 0x3) { 481 req->source_addr & 0x3 || req->dest_addr & 0x3) {
321 pr_err("Invalid DMA request for channel %d\n", ch->id); 482 pr_err("Invalid DMA request for channel %d\n", ch->id);
322 return -EINVAL; 483 return -EINVAL;
323 } 484 }
324 485
486 if ((req->size & 0x3) ||
487 ((ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) && (req->size & 0x7)))
488 {
489 pr_err("Invalid DMA request size 0x%08x for channel %d\n",
490 req->size, ch->id);
491 return -EINVAL;
492 }
493
325 spin_lock_irqsave(&ch->lock, irq_flags); 494 spin_lock_irqsave(&ch->lock, irq_flags);
326 495
327 list_for_each_entry(_req, &ch->list, node) { 496 list_for_each_entry(_req, &ch->list, node) {
328 if (req == _req) { 497 if (req == _req) {
329 spin_unlock_irqrestore(&ch->lock, irq_flags); 498 spin_unlock_irqrestore(&ch->lock, irq_flags);
330 return -EEXIST; 499 return -EEXIST;
331 } 500 }
332 } 501 }
333 502
334 req->bytes_transferred = 0; 503 req->bytes_transferred = 0;
335 req->status = 0; 504 req->status = 0;
336 req->buffer_status = 0; 505 /* STATUS_EMPTY just means the DMA hasn't processed the buf yet. */
506 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY;
337 if (list_empty(&ch->list)) 507 if (list_empty(&ch->list))
338 start_dma = 1; 508 start_dma = 1;
339 509
340 list_add_tail(&req->node, &ch->list); 510 list_add_tail(&req->node, &ch->list);
341 511
342 if (start_dma) 512 if (start_dma) {
343 tegra_dma_update_hw(ch, req); 513 tegra_dma_update_hw(ch, req);
514 } else {
515 /*
516 * Check to see if this request needs to be configured
517 * immediately in continuous mode.
518 */
519 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
520 goto end;
521
522 hreq = list_entry(ch->list.next, typeof(*hreq), node);
523 hnreq = list_entry(hreq->node.next, typeof(*hnreq), node);
524 if (hnreq != req)
525 goto end;
526
527 if ((ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) &&
528 (req->buffer_status != TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL))
529 goto end;
530
531 /* Need to configure the new request now */
532 tegra_dma_update_hw_partial(ch, req);
533 }
344 534
535end:
345 spin_unlock_irqrestore(&ch->lock, irq_flags); 536 spin_unlock_irqrestore(&ch->lock, irq_flags);
346
347 return 0; 537 return 0;
348} 538}
349EXPORT_SYMBOL(tegra_dma_enqueue_req); 539EXPORT_SYMBOL(tegra_dma_enqueue_req);
350 540
351struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) 541static void tegra_dma_dump_channel_usage(void)
542{
543 int i;
544 pr_info("DMA channel allocation dump:\n");
545 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
546 struct tegra_dma_channel *ch = &dma_channels[i];
547 pr_warn("dma %d used by %s\n", i, ch->client_name);
548 }
549 return;
550}
551
552struct tegra_dma_channel *tegra_dma_allocate_channel(int mode,
553 const char namefmt[], ...)
352{ 554{
353 int channel; 555 int channel;
354 struct tegra_dma_channel *ch = NULL; 556 struct tegra_dma_channel *ch = NULL;
557 va_list args;
558 dma_isr_handler isr_handler = NULL;
355 559
356 if (WARN_ON(!tegra_dma_initialized)) 560 if (WARN_ON(!tegra_dma_initialized))
357 return NULL; 561 return NULL;
@@ -364,12 +568,32 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
364 } else { 568 } else {
365 channel = find_first_zero_bit(channel_usage, 569 channel = find_first_zero_bit(channel_usage,
366 ARRAY_SIZE(dma_channels)); 570 ARRAY_SIZE(dma_channels));
367 if (channel >= ARRAY_SIZE(dma_channels)) 571 if (channel >= ARRAY_SIZE(dma_channels)) {
572 tegra_dma_dump_channel_usage();
368 goto out; 573 goto out;
574 }
369 } 575 }
576
577 if (mode & TEGRA_DMA_MODE_ONESHOT)
578 isr_handler = handle_oneshot_dma;
579 else if (mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
580 isr_handler = handle_continuous_dbl_dma;
581 else if (mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE)
582 isr_handler = handle_continuous_sngl_dma;
583 else
584 pr_err("Bad channel mode for DMA ISR handler\n");
585
586 if (!isr_handler)
587 goto out;
588
370 __set_bit(channel, channel_usage); 589 __set_bit(channel, channel_usage);
371 ch = &dma_channels[channel]; 590 ch = &dma_channels[channel];
372 ch->mode = mode; 591 ch->mode = mode;
592 ch->isr_handler = isr_handler;
593 va_start(args, namefmt);
594 vsnprintf(ch->client_name, sizeof(ch->client_name),
595 namefmt, args);
596 va_end(args);
373 597
374out: 598out:
375 mutex_unlock(&tegra_dma_lock); 599 mutex_unlock(&tegra_dma_lock);
@@ -384,15 +608,23 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch)
384 tegra_dma_cancel(ch); 608 tegra_dma_cancel(ch);
385 mutex_lock(&tegra_dma_lock); 609 mutex_lock(&tegra_dma_lock);
386 __clear_bit(ch->id, channel_usage); 610 __clear_bit(ch->id, channel_usage);
611 memset(ch->client_name, 0, sizeof(ch->client_name));
612 ch->isr_handler = NULL;
613 ch->callback = NULL;
614 ch->cb_req = NULL;
387 mutex_unlock(&tegra_dma_lock); 615 mutex_unlock(&tegra_dma_lock);
388} 616}
389EXPORT_SYMBOL(tegra_dma_free_channel); 617EXPORT_SYMBOL(tegra_dma_free_channel);
390 618
391static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, 619static bool tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
392 struct tegra_dma_req *req) 620 struct tegra_dma_req *req)
393{ 621{
394 u32 apb_ptr; 622 u32 apb_ptr;
395 u32 ahb_ptr; 623 u32 ahb_ptr;
624 u32 csr;
625 unsigned long status;
626 unsigned int req_transfer_count;
627 bool configure = false;
396 628
397 if (req->to_memory) { 629 if (req->to_memory) {
398 apb_ptr = req->source_addr; 630 apb_ptr = req->source_addr;
@@ -401,11 +633,44 @@ static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
401 apb_ptr = req->dest_addr; 633 apb_ptr = req->dest_addr;
402 ahb_ptr = req->source_addr; 634 ahb_ptr = req->source_addr;
403 } 635 }
636
637 /*
638 * The dma controller reloads the new configuration for next transfer
639 * after last burst of current transfer completes.
640 * If there is no IEC status then this make sure that last burst
641 * has not be completed.
642 * If there is already IEC status then interrupt handle need to
643 * load new configuration after aborting current dma.
644 */
645 pause_dma(false);
646 status = readl(ch->addr + APB_DMA_CHAN_STA);
647
648 /*
649 * If interrupt is pending then do nothing as the ISR will handle
650 * the programing for new request.
651 */
652 if (status & STA_ISE_EOC) {
653 pr_warn("%s(): "
654 "Skipping new configuration as interrupt is pending\n",
655 __func__);
656 goto exit_config;
657 }
658
659 /* Safe to program new configuration */
404 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); 660 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
405 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); 661 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
406 662
663 req_transfer_count = get_req_xfer_word_count(ch, req);
664 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
665 csr &= ~CSR_WCOUNT_MASK;
666 csr |= (req_transfer_count - 1) << CSR_WCOUNT_SHIFT;
667 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
407 req->status = TEGRA_DMA_REQ_INFLIGHT; 668 req->status = TEGRA_DMA_REQ_INFLIGHT;
408 return; 669 configure = true;
670
671exit_config:
672 resume_dma();
673 return configure;
409} 674}
410 675
411static void tegra_dma_update_hw(struct tegra_dma_channel *ch, 676static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
@@ -416,6 +681,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
416 int ahb_bus_width; 681 int ahb_bus_width;
417 int apb_bus_width; 682 int apb_bus_width;
418 int index; 683 int index;
684 unsigned int req_transfer_count;
419 685
420 u32 ahb_seq; 686 u32 ahb_seq;
421 u32 apb_seq; 687 u32 apb_seq;
@@ -424,27 +690,72 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
424 u32 csr; 690 u32 csr;
425 691
426 csr = CSR_IE_EOC | CSR_FLOW; 692 csr = CSR_IE_EOC | CSR_FLOW;
427 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; 693 ahb_seq = AHB_SEQ_INTR_ENB;
694
695 switch (req->req_sel) {
696 case TEGRA_DMA_REQ_SEL_SL2B1:
697 case TEGRA_DMA_REQ_SEL_SL2B2:
698 case TEGRA_DMA_REQ_SEL_SL2B3:
699 case TEGRA_DMA_REQ_SEL_SL2B4:
700#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
701 case TEGRA_DMA_REQ_SEL_SL2B5:
702 case TEGRA_DMA_REQ_SEL_SL2B6:
703 case TEGRA_DMA_REQ_SEL_APBIF_CH0:
704 case TEGRA_DMA_REQ_SEL_APBIF_CH1:
705 case TEGRA_DMA_REQ_SEL_APBIF_CH2:
706 case TEGRA_DMA_REQ_SEL_APBIF_CH3:
707#endif
708 case TEGRA_DMA_REQ_SEL_SPI:
709 /* dtv interface has fixed burst size of 4 */
710 if (req->fixed_burst_size) {
711 ahb_seq |= AHB_SEQ_BURST_4;
712 break;
713 }
714 /* For spi/slink the burst size based on transfer size
715 * i.e. if multiple of 32 bytes then busrt is 8
716 * word(8x32bits) else if multiple of 16 bytes then
717 * burst is 4 word(4x32bits) else burst size is 1
718 * word(1x32bits) */
719 if (req->size & 0xF)
720 ahb_seq |= AHB_SEQ_BURST_1;
721 else if ((req->size >> 4) & 0x1)
722 ahb_seq |= AHB_SEQ_BURST_4;
723 else
724 ahb_seq |= AHB_SEQ_BURST_8;
725 break;
726#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
727 case TEGRA_DMA_REQ_SEL_I2S_2:
728 case TEGRA_DMA_REQ_SEL_I2S_1:
729 case TEGRA_DMA_REQ_SEL_SPD_I:
730 case TEGRA_DMA_REQ_SEL_UI_I:
731 case TEGRA_DMA_REQ_SEL_I2S2_2:
732 case TEGRA_DMA_REQ_SEL_I2S2_1:
733 /* For ARCH_2x i2s/spdif burst size is 4 word */
734 ahb_seq |= AHB_SEQ_BURST_4;
735 break;
736#endif
737
738 default:
739 ahb_seq |= AHB_SEQ_BURST_1;
740 break;
741 }
742
428 apb_seq = 0; 743 apb_seq = 0;
429 744
430 csr |= req->req_sel << CSR_REQ_SEL_SHIFT; 745 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
431 746
432 /* One shot mode is always single buffered, 747 req_transfer_count = get_req_xfer_word_count(ch, req);
433 * continuous mode is always double buffered 748
434 * */ 749 /* One shot mode is always single buffered. Continuous mode could
435 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { 750 * support either.
751 */
752 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
436 csr |= CSR_ONCE; 753 csr |= CSR_ONCE;
437 ch->req_transfer_count = (req->size >> 2) - 1;
438 } else {
439 ahb_seq |= AHB_SEQ_DBL_BUF;
440 754
441 /* In double buffered mode, we set the size to half the 755 if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
442 * requested size and interrupt when half the buffer 756 ahb_seq |= AHB_SEQ_DBL_BUF;
443 * is full */
444 ch->req_transfer_count = (req->size >> 3) - 1;
445 }
446 757
447 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; 758 csr |= (req_transfer_count - 1) << CSR_WCOUNT_SHIFT;
448 759
449 if (req->to_memory) { 760 if (req->to_memory) {
450 apb_ptr = req->source_addr; 761 apb_ptr = req->source_addr;
@@ -518,159 +829,163 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
518static void handle_oneshot_dma(struct tegra_dma_channel *ch) 829static void handle_oneshot_dma(struct tegra_dma_channel *ch)
519{ 830{
520 struct tegra_dma_req *req; 831 struct tegra_dma_req *req;
521 unsigned long irq_flags;
522 832
523 spin_lock_irqsave(&ch->lock, irq_flags); 833 req = list_entry(ch->list.next, typeof(*req), node);
524 if (list_empty(&ch->list)) { 834 list_del(&req->node);
525 spin_unlock_irqrestore(&ch->lock, irq_flags); 835 req->bytes_transferred += req->size;
526 return; 836 req->status = TEGRA_DMA_REQ_SUCCESS;
527 } 837
838 ch->callback = req->complete;
839 ch->cb_req = req;
840
841 start_head_req(ch);
842 return;
843}
844
845static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
846{
847 struct tegra_dma_req *req;
528 848
529 req = list_entry(ch->list.next, typeof(*req), node); 849 req = list_entry(ch->list.next, typeof(*req), node);
530 if (req) {
531 int bytes_transferred;
532 850
533 bytes_transferred = ch->req_transfer_count; 851 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
534 bytes_transferred += 1; 852 bool is_dma_ping_complete;
535 bytes_transferred <<= 2; 853 unsigned long status = readl(ch->addr + APB_DMA_CHAN_STA);
854 is_dma_ping_complete = (status & STA_PING_PONG) ? true : false;
536 855
537 list_del(&req->node); 856 /* Ping pong status shows in reverse if it is Memory write */
538 req->bytes_transferred = bytes_transferred; 857 if (req->to_memory)
858 is_dma_ping_complete = !is_dma_ping_complete;
859
860 /* Out of sync - Release current buffer */
861 if (!is_dma_ping_complete) {
862 /*
863 * We should not land here if queue mechanism
864 * with system latency are properly configured.
865 */
866 req->bytes_transferred += req->size;
867
868 list_del(&req->node);
869 ch->callback = req->complete;
870 ch->cb_req = req;
871
872 tegra_dma_abort_req(ch, req,
873 "Dma becomes out of sync for ping-pong buffer");
874 return;
875 }
876
877 /*
878 * Configure next request so after full buffer transfer,
879 * it can be start without sw intervention.
880 */
881 configure_next_req(ch, req);
882
883 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
539 req->status = TEGRA_DMA_REQ_SUCCESS; 884 req->status = TEGRA_DMA_REQ_SUCCESS;
885 req->bytes_transferred += req->size >> 1;
540 886
541 spin_unlock_irqrestore(&ch->lock, irq_flags); 887 ch->callback = req->threshold;
542 /* Callback should be called without any lock */ 888 ch->cb_req = req;
543 pr_debug("%s: transferred %d bytes\n", __func__, 889 return;
544 req->bytes_transferred);
545 req->complete(req);
546 spin_lock_irqsave(&ch->lock, irq_flags);
547 } 890 }
548 891
549 if (!list_empty(&ch->list)) { 892 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
550 req = list_entry(ch->list.next, typeof(*req), node); 893 /* Interrupt for full buffer complete */
551 /* the complete function we just called may have enqueued 894 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
552 another req, in which case dma has already started */ 895 req->bytes_transferred += req->size >> 1;
553 if (req->status != TEGRA_DMA_REQ_INFLIGHT) 896 req->status = TEGRA_DMA_REQ_SUCCESS;
554 tegra_dma_update_hw(ch, req); 897
898 list_del(&req->node);
899 ch->callback = req->complete;
900 ch->cb_req = req;
901
902 handle_continuous_head_request(ch, req);
903 return;
555 } 904 }
556 spin_unlock_irqrestore(&ch->lock, irq_flags); 905 tegra_dma_abort_req(ch, req, "Dma status is not on sync\n");
906 /* Dma should be stop much earlier */
907 BUG();
908 return;
557} 909}
558 910
559static void handle_continuous_dma(struct tegra_dma_channel *ch) 911static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch)
560{ 912{
561 struct tegra_dma_req *req; 913 struct tegra_dma_req *req;
562 unsigned long irq_flags;
563 914
564 spin_lock_irqsave(&ch->lock, irq_flags); 915 req = list_entry(ch->list.next, typeof(*req), node);
565 if (list_empty(&ch->list)) { 916 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_FULL) {
566 spin_unlock_irqrestore(&ch->lock, irq_flags); 917 tegra_dma_stop(ch);
918 pr_err("%s: DMA complete irq without corresponding req\n",
919 __func__);
920 WARN_ON(1);
567 return; 921 return;
568 } 922 }
569 923
570 req = list_entry(ch->list.next, typeof(*req), node); 924 /* Handle the case when buffer is completely full */
571 if (req) { 925 req->bytes_transferred += req->size;
572 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { 926 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
573 bool is_dma_ping_complete; 927 req->status = TEGRA_DMA_REQ_SUCCESS;
574 is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
575 & STA_PING_PONG) ? true : false;
576 if (req->to_memory)
577 is_dma_ping_complete = !is_dma_ping_complete;
578 /* Out of sync - Release current buffer */
579 if (!is_dma_ping_complete) {
580 int bytes_transferred;
581
582 bytes_transferred = ch->req_transfer_count;
583 bytes_transferred += 1;
584 bytes_transferred <<= 3;
585 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
586 req->bytes_transferred = bytes_transferred;
587 req->status = TEGRA_DMA_REQ_SUCCESS;
588 tegra_dma_stop(ch);
589
590 if (!list_is_last(&req->node, &ch->list)) {
591 struct tegra_dma_req *next_req;
592
593 next_req = list_entry(req->node.next,
594 typeof(*next_req), node);
595 tegra_dma_update_hw(ch, next_req);
596 }
597
598 list_del(&req->node);
599
600 /* DMA lock is NOT held when callbak is called */
601 spin_unlock_irqrestore(&ch->lock, irq_flags);
602 req->complete(req);
603 return;
604 }
605 /* Load the next request into the hardware, if available
606 * */
607 if (!list_is_last(&req->node, &ch->list)) {
608 struct tegra_dma_req *next_req;
609
610 next_req = list_entry(req->node.next,
611 typeof(*next_req), node);
612 tegra_dma_update_hw_partial(ch, next_req);
613 }
614 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
615 req->status = TEGRA_DMA_REQ_SUCCESS;
616 /* DMA lock is NOT held when callback is called */
617 spin_unlock_irqrestore(&ch->lock, irq_flags);
618 if (likely(req->threshold))
619 req->threshold(req);
620 return;
621
622 } else if (req->buffer_status ==
623 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
624 /* Callback when the buffer is completely full (i.e on
625 * the second interrupt */
626 int bytes_transferred;
627 928
628 bytes_transferred = ch->req_transfer_count; 929 list_del(&req->node);
629 bytes_transferred += 1; 930 ch->callback = req->complete;
630 bytes_transferred <<= 3; 931 ch->cb_req = req;
631 932
632 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; 933 handle_continuous_head_request(ch, req);
633 req->bytes_transferred = bytes_transferred; 934 return;
634 req->status = TEGRA_DMA_REQ_SUCCESS; 935}
635 list_del(&req->node);
636 936
637 /* DMA lock is NOT held when callbak is called */ 937static void handle_dma_isr_locked(struct tegra_dma_channel *ch)
638 spin_unlock_irqrestore(&ch->lock, irq_flags); 938{
639 req->complete(req); 939 /* There should be proper isr handler */
640 return; 940 BUG_ON(!ch->isr_handler);
641 941
642 } else { 942 if (list_empty(&ch->list)) {
643 BUG(); 943 tegra_dma_stop(ch);
644 } 944 pr_err("%s: No requests in the list.\n", __func__);
945 WARN_ON(1);
946 return;
645 } 947 }
646 spin_unlock_irqrestore(&ch->lock, irq_flags); 948
949 ch->isr_handler(ch);
647} 950}
648 951
649static irqreturn_t dma_isr(int irq, void *data) 952static irqreturn_t dma_isr(int irq, void *data)
650{ 953{
651 struct tegra_dma_channel *ch = data; 954 struct tegra_dma_channel *ch = data;
955 unsigned long irq_flags;
652 unsigned long status; 956 unsigned long status;
957 dma_callback callback = NULL;
958 struct tegra_dma_req *cb_req = NULL;
959
960 spin_lock_irqsave(&ch->lock, irq_flags);
961
962 /*
963 * Calbacks should be set and cleared while holding the spinlock,
964 * never left set
965 */
966 if (ch->callback || ch->cb_req)
967 pr_err("%s():"
968 "Channel %d callbacks are not initialized properly\n",
969 __func__, ch->id);
970 BUG_ON(ch->callback || ch->cb_req);
653 971
654 status = readl(ch->addr + APB_DMA_CHAN_STA); 972 status = readl(ch->addr + APB_DMA_CHAN_STA);
655 if (status & STA_ISE_EOC) 973 if (status & STA_ISE_EOC) {
974 /* Clear dma int status */
656 writel(status, ch->addr + APB_DMA_CHAN_STA); 975 writel(status, ch->addr + APB_DMA_CHAN_STA);
657 else { 976 handle_dma_isr_locked(ch);
658 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); 977 callback = ch->callback;
659 return IRQ_HANDLED; 978 cb_req = ch->cb_req;
979 ch->callback = NULL;
980 ch->cb_req = NULL;
981 } else {
982 pr_info("Interrupt is already handled %d\n", ch->id);
660 } 983 }
661 return IRQ_WAKE_THREAD; 984 spin_unlock_irqrestore(&ch->lock, irq_flags);
662}
663
664static irqreturn_t dma_thread_fn(int irq, void *data)
665{
666 struct tegra_dma_channel *ch = data;
667
668 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
669 handle_oneshot_dma(ch);
670 else
671 handle_continuous_dma(ch);
672
673 985
986 /* Call callback function to notify client if it is there */
987 if (callback)
988 callback(cb_req);
674 return IRQ_HANDLED; 989 return IRQ_HANDLED;
675} 990}
676 991
@@ -679,44 +994,57 @@ int __init tegra_dma_init(void)
679 int ret = 0; 994 int ret = 0;
680 int i; 995 int i;
681 unsigned int irq; 996 unsigned int irq;
682 void __iomem *addr;
683 struct clk *c;
684 997
685 bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); 998 bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
686 999
687 c = clk_get_sys("tegra-dma", NULL); 1000 dma_clk = clk_get_sys("tegra-dma", NULL);
688 if (IS_ERR(c)) { 1001 if (IS_ERR_OR_NULL(dma_clk)) {
689 pr_err("Unable to get clock for APB DMA\n"); 1002 pr_err("Unable to get clock for APB DMA\n");
690 ret = PTR_ERR(c); 1003 ret = PTR_ERR(dma_clk);
691 goto fail; 1004 goto fail;
692 } 1005 }
693 ret = clk_enable(c); 1006 ret = clk_enable(dma_clk);
694 if (ret != 0) { 1007 if (ret != 0) {
695 pr_err("Unable to enable clock for APB DMA\n"); 1008 pr_err("Unable to enable clock for APB DMA\n");
696 goto fail; 1009 goto fail;
697 } 1010 }
698 1011
699 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); 1012 /*
700 writel(GEN_ENABLE, addr + APB_DMA_GEN); 1013 * Resetting all dma channels to make sure all channels are in init
701 writel(0, addr + APB_DMA_CNTRL); 1014 * state.
1015 */
1016 tegra_periph_reset_assert(dma_clk);
1017 udelay(10);
1018 tegra_periph_reset_deassert(dma_clk);
1019 udelay(10);
1020
1021 writel(GEN_ENABLE, general_dma_addr + APB_DMA_GEN);
1022 writel(0, general_dma_addr + APB_DMA_CNTRL);
702 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), 1023 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
703 addr + APB_DMA_IRQ_MASK_SET); 1024 general_dma_addr + APB_DMA_IRQ_MASK_SET);
704 1025
705 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { 1026 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
706 struct tegra_dma_channel *ch = &dma_channels[i]; 1027 struct tegra_dma_channel *ch = &dma_channels[i];
707 1028
708 ch->id = i; 1029 ch->id = i;
1030 ch->isr_handler = NULL;
709 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); 1031 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
710 1032
1033 memset(ch->client_name, 0, sizeof(ch->client_name));
1034
711 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + 1035 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
712 TEGRA_APB_DMA_CH0_SIZE * i); 1036 TEGRA_APB_DMA_CH0_SIZE * i);
713 1037
714 spin_lock_init(&ch->lock); 1038 spin_lock_init(&ch->lock);
715 INIT_LIST_HEAD(&ch->list); 1039 INIT_LIST_HEAD(&ch->list);
716 1040
717 irq = INT_APB_DMA_CH0 + i; 1041#ifndef CONFIG_ARCH_TEGRA_2x_SOC
718 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, 1042 if (i >= 16)
719 dma_channels[i].name, ch); 1043 irq = INT_APB_DMA_CH16 + i - 16;
1044 else
1045#endif
1046 irq = INT_APB_DMA_CH0 + i;
1047 ret = request_irq(irq, dma_isr, 0, dma_channels[i].name, ch);
720 if (ret) { 1048 if (ret) {
721 pr_err("Failed to register IRQ %d for DMA %d\n", 1049 pr_err("Failed to register IRQ %d for DMA %d\n",
722 irq, i); 1050 irq, i);
@@ -733,7 +1061,7 @@ int __init tegra_dma_init(void)
733 1061
734 return 0; 1062 return 0;
735fail: 1063fail:
736 writel(0, addr + APB_DMA_GEN); 1064 writel(0, general_dma_addr + APB_DMA_GEN);
737 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { 1065 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
738 struct tegra_dma_channel *ch = &dma_channels[i]; 1066 struct tegra_dma_channel *ch = &dma_channels[i];
739 if (ch->irq) 1067 if (ch->irq)
@@ -743,21 +1071,21 @@ fail:
743} 1071}
744postcore_initcall(tegra_dma_init); 1072postcore_initcall(tegra_dma_init);
745 1073
746#ifdef CONFIG_PM 1074#ifdef CONFIG_PM_SLEEP
1075
747static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; 1076static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
748 1077
749void tegra_dma_suspend(void) 1078static int tegra_dma_suspend(void)
750{ 1079{
751 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
752 u32 *ctx = apb_dma; 1080 u32 *ctx = apb_dma;
753 int i; 1081 int i;
754 1082
755 *ctx++ = readl(addr + APB_DMA_GEN); 1083 *ctx++ = readl(general_dma_addr + APB_DMA_GEN);
756 *ctx++ = readl(addr + APB_DMA_CNTRL); 1084 *ctx++ = readl(general_dma_addr + APB_DMA_CNTRL);
757 *ctx++ = readl(addr + APB_DMA_IRQ_MASK); 1085 *ctx++ = readl(general_dma_addr + APB_DMA_IRQ_MASK);
758 1086
759 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { 1087 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
760 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + 1088 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
761 TEGRA_APB_DMA_CH0_SIZE * i); 1089 TEGRA_APB_DMA_CH0_SIZE * i);
762 1090
763 *ctx++ = readl(addr + APB_DMA_CHAN_CSR); 1091 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
@@ -766,20 +1094,26 @@ void tegra_dma_suspend(void)
766 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR); 1094 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
767 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ); 1095 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
768 } 1096 }
1097
1098 /* Disabling clock of dma. */
1099 clk_disable(dma_clk);
1100 return 0;
769} 1101}
770 1102
771void tegra_dma_resume(void) 1103static void tegra_dma_resume(void)
772{ 1104{
773 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
774 u32 *ctx = apb_dma; 1105 u32 *ctx = apb_dma;
775 int i; 1106 int i;
776 1107
777 writel(*ctx++, addr + APB_DMA_GEN); 1108 /* Enabling clock of dma. */
778 writel(*ctx++, addr + APB_DMA_CNTRL); 1109 clk_enable(dma_clk);
779 writel(*ctx++, addr + APB_DMA_IRQ_MASK); 1110
1111 writel(*ctx++, general_dma_addr + APB_DMA_GEN);
1112 writel(*ctx++, general_dma_addr + APB_DMA_CNTRL);
1113 writel(*ctx++, general_dma_addr + APB_DMA_IRQ_MASK);
780 1114
781 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { 1115 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
782 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + 1116 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
783 TEGRA_APB_DMA_CH0_SIZE * i); 1117 TEGRA_APB_DMA_CH0_SIZE * i);
784 1118
785 writel(*ctx++, addr + APB_DMA_CHAN_CSR); 1119 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
@@ -790,4 +1124,80 @@ void tegra_dma_resume(void)
790 } 1124 }
791} 1125}
792 1126
1127static struct syscore_ops tegra_dma_syscore_ops = {
1128 .suspend = tegra_dma_suspend,
1129 .resume = tegra_dma_resume,
1130};
1131
1132static int tegra_dma_syscore_init(void)
1133{
1134 register_syscore_ops(&tegra_dma_syscore_ops);
1135
1136 return 0;
1137}
1138subsys_initcall(tegra_dma_syscore_init);
1139#endif
1140
1141#ifdef CONFIG_DEBUG_FS
1142
1143#include <linux/debugfs.h>
1144#include <linux/seq_file.h>
1145
1146static int dbg_dma_show(struct seq_file *s, void *unused)
1147{
1148 int i;
1149
1150 seq_printf(s, " APBDMA global register\n");
1151 seq_printf(s, "DMA_GEN: 0x%08x\n",
1152 __raw_readl(general_dma_addr + APB_DMA_GEN));
1153 seq_printf(s, "DMA_CNTRL: 0x%08x\n",
1154 __raw_readl(general_dma_addr + APB_DMA_CNTRL));
1155 seq_printf(s, "IRQ_MASK: 0x%08x\n",
1156 __raw_readl(general_dma_addr + APB_DMA_IRQ_MASK));
1157
1158 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
1159 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
1160 TEGRA_APB_DMA_CH0_SIZE * i);
1161
1162 seq_printf(s, " APBDMA channel %02d register\n", i);
1163 seq_printf(s, "0x00: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1164 __raw_readl(addr + 0x0),
1165 __raw_readl(addr + 0x4),
1166 __raw_readl(addr + 0x8),
1167 __raw_readl(addr + 0xC));
1168 seq_printf(s, "0x10: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1169 __raw_readl(addr + 0x10),
1170 __raw_readl(addr + 0x14),
1171 __raw_readl(addr + 0x18),
1172 __raw_readl(addr + 0x1C));
1173 }
1174 seq_printf(s, "\nAPB DMA users\n");
1175 seq_printf(s, "-------------\n");
1176 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
1177 struct tegra_dma_channel *ch = &dma_channels[i];
1178 if (strlen(ch->client_name) > 0)
1179 seq_printf(s, "dma %d -> %s\n", i, ch->client_name);
1180 }
1181 return 0;
1182}
1183
1184static int dbg_dma_open(struct inode *inode, struct file *file)
1185{
1186 return single_open(file, dbg_dma_show, &inode->i_private);
1187}
1188
1189static const struct file_operations debug_fops = {
1190 .open = dbg_dma_open,
1191 .read = seq_read,
1192 .llseek = seq_lseek,
1193 .release = single_release,
1194};
1195
1196static int __init tegra_dma_debuginit(void)
1197{
1198 (void) debugfs_create_file("tegra_dma", S_IRUGO,
1199 NULL, NULL, &debug_fops);
1200 return 0;
1201}
1202late_initcall(tegra_dma_debuginit);
793#endif 1203#endif