diff options
Diffstat (limited to 'arch/arm/mach-tegra/dma.c')
-rw-r--r-- | arch/arm/mach-tegra/dma.c | 243 |
1 files changed, 142 insertions, 101 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index edda6ec5e925..e945ae28ee77 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c | |||
@@ -27,9 +27,11 @@ | |||
27 | #include <linux/err.h> | 27 | #include <linux/err.h> |
28 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/clk.h> | ||
30 | #include <mach/dma.h> | 31 | #include <mach/dma.h> |
31 | #include <mach/irqs.h> | 32 | #include <mach/irqs.h> |
32 | #include <mach/iomap.h> | 33 | #include <mach/iomap.h> |
34 | #include <mach/suspend.h> | ||
33 | 35 | ||
34 | #define APB_DMA_GEN 0x000 | 36 | #define APB_DMA_GEN 0x000 |
35 | #define GEN_ENABLE (1<<31) | 37 | #define GEN_ENABLE (1<<31) |
@@ -120,17 +122,14 @@ struct tegra_dma_channel { | |||
120 | void __iomem *addr; | 122 | void __iomem *addr; |
121 | int mode; | 123 | int mode; |
122 | int irq; | 124 | int irq; |
123 | 125 | int req_transfer_count; | |
124 | /* Register shadow */ | ||
125 | u32 csr; | ||
126 | u32 ahb_seq; | ||
127 | u32 ahb_ptr; | ||
128 | u32 apb_seq; | ||
129 | u32 apb_ptr; | ||
130 | }; | 126 | }; |
131 | 127 | ||
132 | #define NV_DMA_MAX_CHANNELS 32 | 128 | #define NV_DMA_MAX_CHANNELS 32 |
133 | 129 | ||
130 | static bool tegra_dma_initialized; | ||
131 | static DEFINE_MUTEX(tegra_dma_lock); | ||
132 | |||
134 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | 133 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); |
135 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | 134 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; |
136 | 135 | ||
@@ -138,7 +137,6 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
138 | struct tegra_dma_req *req); | 137 | struct tegra_dma_req *req); |
139 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 138 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, |
140 | struct tegra_dma_req *req); | 139 | struct tegra_dma_req *req); |
141 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch); | ||
142 | static void tegra_dma_stop(struct tegra_dma_channel *ch); | 140 | static void tegra_dma_stop(struct tegra_dma_channel *ch); |
143 | 141 | ||
144 | void tegra_dma_flush(struct tegra_dma_channel *ch) | 142 | void tegra_dma_flush(struct tegra_dma_channel *ch) |
@@ -150,6 +148,9 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) | |||
150 | { | 148 | { |
151 | struct tegra_dma_req *req; | 149 | struct tegra_dma_req *req; |
152 | 150 | ||
151 | if (tegra_dma_is_empty(ch)) | ||
152 | return; | ||
153 | |||
153 | req = list_entry(ch->list.next, typeof(*req), node); | 154 | req = list_entry(ch->list.next, typeof(*req), node); |
154 | 155 | ||
155 | tegra_dma_dequeue_req(ch, req); | 156 | tegra_dma_dequeue_req(ch, req); |
@@ -158,10 +159,10 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) | |||
158 | 159 | ||
159 | void tegra_dma_stop(struct tegra_dma_channel *ch) | 160 | void tegra_dma_stop(struct tegra_dma_channel *ch) |
160 | { | 161 | { |
161 | unsigned int csr; | 162 | u32 csr; |
162 | unsigned int status; | 163 | u32 status; |
163 | 164 | ||
164 | csr = ch->csr; | 165 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
165 | csr &= ~CSR_IE_EOC; | 166 | csr &= ~CSR_IE_EOC; |
166 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 167 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
167 | 168 | ||
@@ -175,19 +176,16 @@ void tegra_dma_stop(struct tegra_dma_channel *ch) | |||
175 | 176 | ||
176 | int tegra_dma_cancel(struct tegra_dma_channel *ch) | 177 | int tegra_dma_cancel(struct tegra_dma_channel *ch) |
177 | { | 178 | { |
178 | unsigned int csr; | 179 | u32 csr; |
179 | unsigned long irq_flags; | 180 | unsigned long irq_flags; |
180 | 181 | ||
181 | spin_lock_irqsave(&ch->lock, irq_flags); | 182 | spin_lock_irqsave(&ch->lock, irq_flags); |
182 | while (!list_empty(&ch->list)) | 183 | while (!list_empty(&ch->list)) |
183 | list_del(ch->list.next); | 184 | list_del(ch->list.next); |
184 | 185 | ||
185 | csr = ch->csr; | 186 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
186 | csr &= ~CSR_REQ_SEL_MASK; | 187 | csr &= ~CSR_REQ_SEL_MASK; |
187 | csr |= CSR_REQ_SEL_INVALID; | 188 | csr |= CSR_REQ_SEL_INVALID; |
188 | |||
189 | /* Set the enable as that is not shadowed */ | ||
190 | csr |= CSR_ENB; | ||
191 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 189 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
192 | 190 | ||
193 | tegra_dma_stop(ch); | 191 | tegra_dma_stop(ch); |
@@ -229,18 +227,15 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | |||
229 | * - Finally stop or program the DMA to the next buffer in the | 227 | * - Finally stop or program the DMA to the next buffer in the |
230 | * list. | 228 | * list. |
231 | */ | 229 | */ |
232 | csr = ch->csr; | 230 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
233 | csr &= ~CSR_REQ_SEL_MASK; | 231 | csr &= ~CSR_REQ_SEL_MASK; |
234 | csr |= CSR_REQ_SEL_INVALID; | 232 | csr |= CSR_REQ_SEL_INVALID; |
235 | |||
236 | /* Set the enable as that is not shadowed */ | ||
237 | csr |= CSR_ENB; | ||
238 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 233 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
239 | 234 | ||
240 | /* Get the transfer count */ | 235 | /* Get the transfer count */ |
241 | status = readl(ch->addr + APB_DMA_CHAN_STA); | 236 | status = readl(ch->addr + APB_DMA_CHAN_STA); |
242 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; | 237 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; |
243 | req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | 238 | req_transfer_count = ch->req_transfer_count; |
244 | req_transfer_count += 1; | 239 | req_transfer_count += 1; |
245 | to_transfer += 1; | 240 | to_transfer += 1; |
246 | 241 | ||
@@ -318,6 +313,7 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | |||
318 | struct tegra_dma_req *req) | 313 | struct tegra_dma_req *req) |
319 | { | 314 | { |
320 | unsigned long irq_flags; | 315 | unsigned long irq_flags; |
316 | struct tegra_dma_req *_req; | ||
321 | int start_dma = 0; | 317 | int start_dma = 0; |
322 | 318 | ||
323 | if (req->size > NV_DMA_MAX_TRASFER_SIZE || | 319 | if (req->size > NV_DMA_MAX_TRASFER_SIZE || |
@@ -328,6 +324,13 @@ int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | |||
328 | 324 | ||
329 | spin_lock_irqsave(&ch->lock, irq_flags); | 325 | spin_lock_irqsave(&ch->lock, irq_flags); |
330 | 326 | ||
327 | list_for_each_entry(_req, &ch->list, node) { | ||
328 | if (req == _req) { | ||
329 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
330 | return -EEXIST; | ||
331 | } | ||
332 | } | ||
333 | |||
331 | req->bytes_transferred = 0; | 334 | req->bytes_transferred = 0; |
332 | req->status = 0; | 335 | req->status = 0; |
333 | req->buffer_status = 0; | 336 | req->buffer_status = 0; |
@@ -348,7 +351,12 @@ EXPORT_SYMBOL(tegra_dma_enqueue_req); | |||
348 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | 351 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) |
349 | { | 352 | { |
350 | int channel; | 353 | int channel; |
351 | struct tegra_dma_channel *ch; | 354 | struct tegra_dma_channel *ch = NULL; |
355 | |||
356 | if (WARN_ON(!tegra_dma_initialized)) | ||
357 | return NULL; | ||
358 | |||
359 | mutex_lock(&tegra_dma_lock); | ||
352 | 360 | ||
353 | /* first channel is the shared channel */ | 361 | /* first channel is the shared channel */ |
354 | if (mode & TEGRA_DMA_SHARED) { | 362 | if (mode & TEGRA_DMA_SHARED) { |
@@ -357,11 +365,14 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | |||
357 | channel = find_first_zero_bit(channel_usage, | 365 | channel = find_first_zero_bit(channel_usage, |
358 | ARRAY_SIZE(dma_channels)); | 366 | ARRAY_SIZE(dma_channels)); |
359 | if (channel >= ARRAY_SIZE(dma_channels)) | 367 | if (channel >= ARRAY_SIZE(dma_channels)) |
360 | return NULL; | 368 | goto out; |
361 | } | 369 | } |
362 | __set_bit(channel, channel_usage); | 370 | __set_bit(channel, channel_usage); |
363 | ch = &dma_channels[channel]; | 371 | ch = &dma_channels[channel]; |
364 | ch->mode = mode; | 372 | ch->mode = mode; |
373 | |||
374 | out: | ||
375 | mutex_unlock(&tegra_dma_lock); | ||
365 | return ch; | 376 | return ch; |
366 | } | 377 | } |
367 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | 378 | EXPORT_SYMBOL(tegra_dma_allocate_channel); |
@@ -371,22 +382,27 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch) | |||
371 | if (ch->mode & TEGRA_DMA_SHARED) | 382 | if (ch->mode & TEGRA_DMA_SHARED) |
372 | return; | 383 | return; |
373 | tegra_dma_cancel(ch); | 384 | tegra_dma_cancel(ch); |
385 | mutex_lock(&tegra_dma_lock); | ||
374 | __clear_bit(ch->id, channel_usage); | 386 | __clear_bit(ch->id, channel_usage); |
387 | mutex_unlock(&tegra_dma_lock); | ||
375 | } | 388 | } |
376 | EXPORT_SYMBOL(tegra_dma_free_channel); | 389 | EXPORT_SYMBOL(tegra_dma_free_channel); |
377 | 390 | ||
378 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 391 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, |
379 | struct tegra_dma_req *req) | 392 | struct tegra_dma_req *req) |
380 | { | 393 | { |
394 | u32 apb_ptr; | ||
395 | u32 ahb_ptr; | ||
396 | |||
381 | if (req->to_memory) { | 397 | if (req->to_memory) { |
382 | ch->apb_ptr = req->source_addr; | 398 | apb_ptr = req->source_addr; |
383 | ch->ahb_ptr = req->dest_addr; | 399 | ahb_ptr = req->dest_addr; |
384 | } else { | 400 | } else { |
385 | ch->apb_ptr = req->dest_addr; | 401 | apb_ptr = req->dest_addr; |
386 | ch->ahb_ptr = req->source_addr; | 402 | ahb_ptr = req->source_addr; |
387 | } | 403 | } |
388 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 404 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); |
389 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | 405 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); |
390 | 406 | ||
391 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 407 | req->status = TEGRA_DMA_REQ_INFLIGHT; |
392 | return; | 408 | return; |
@@ -400,38 +416,39 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
400 | int ahb_bus_width; | 416 | int ahb_bus_width; |
401 | int apb_bus_width; | 417 | int apb_bus_width; |
402 | int index; | 418 | int index; |
403 | unsigned long csr; | ||
404 | 419 | ||
420 | u32 ahb_seq; | ||
421 | u32 apb_seq; | ||
422 | u32 ahb_ptr; | ||
423 | u32 apb_ptr; | ||
424 | u32 csr; | ||
425 | |||
426 | csr = CSR_IE_EOC | CSR_FLOW; | ||
427 | ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; | ||
428 | apb_seq = 0; | ||
405 | 429 | ||
406 | ch->csr |= CSR_FLOW; | 430 | csr |= req->req_sel << CSR_REQ_SEL_SHIFT; |
407 | ch->csr &= ~CSR_REQ_SEL_MASK; | ||
408 | ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | ||
409 | ch->ahb_seq &= ~AHB_SEQ_BURST_MASK; | ||
410 | ch->ahb_seq |= AHB_SEQ_BURST_1; | ||
411 | 431 | ||
412 | /* One shot mode is always single buffered, | 432 | /* One shot mode is always single buffered, |
413 | * continuous mode is always double buffered | 433 | * continuous mode is always double buffered |
414 | * */ | 434 | * */ |
415 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { | 435 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { |
416 | ch->csr |= CSR_ONCE; | 436 | csr |= CSR_ONCE; |
417 | ch->ahb_seq &= ~AHB_SEQ_DBL_BUF; | 437 | ch->req_transfer_count = (req->size >> 2) - 1; |
418 | ch->csr &= ~CSR_WCOUNT_MASK; | ||
419 | ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT; | ||
420 | } else { | 438 | } else { |
421 | ch->csr &= ~CSR_ONCE; | 439 | ahb_seq |= AHB_SEQ_DBL_BUF; |
422 | ch->ahb_seq |= AHB_SEQ_DBL_BUF; | ||
423 | 440 | ||
424 | /* In double buffered mode, we set the size to half the | 441 | /* In double buffered mode, we set the size to half the |
425 | * requested size and interrupt when half the buffer | 442 | * requested size and interrupt when half the buffer |
426 | * is full */ | 443 | * is full */ |
427 | ch->csr &= ~CSR_WCOUNT_MASK; | 444 | ch->req_transfer_count = (req->size >> 3) - 1; |
428 | ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT; | ||
429 | } | 445 | } |
430 | 446 | ||
447 | csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; | ||
448 | |||
431 | if (req->to_memory) { | 449 | if (req->to_memory) { |
432 | ch->csr &= ~CSR_DIR; | 450 | apb_ptr = req->source_addr; |
433 | ch->apb_ptr = req->source_addr; | 451 | ahb_ptr = req->dest_addr; |
434 | ch->ahb_ptr = req->dest_addr; | ||
435 | 452 | ||
436 | apb_addr_wrap = req->source_wrap; | 453 | apb_addr_wrap = req->source_wrap; |
437 | ahb_addr_wrap = req->dest_wrap; | 454 | ahb_addr_wrap = req->dest_wrap; |
@@ -439,9 +456,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
439 | ahb_bus_width = req->dest_bus_width; | 456 | ahb_bus_width = req->dest_bus_width; |
440 | 457 | ||
441 | } else { | 458 | } else { |
442 | ch->csr |= CSR_DIR; | 459 | csr |= CSR_DIR; |
443 | ch->apb_ptr = req->dest_addr; | 460 | apb_ptr = req->dest_addr; |
444 | ch->ahb_ptr = req->source_addr; | 461 | ahb_ptr = req->source_addr; |
445 | 462 | ||
446 | apb_addr_wrap = req->dest_wrap; | 463 | apb_addr_wrap = req->dest_wrap; |
447 | ahb_addr_wrap = req->source_wrap; | 464 | ahb_addr_wrap = req->source_wrap; |
@@ -460,8 +477,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
460 | index++; | 477 | index++; |
461 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); | 478 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); |
462 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | 479 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); |
463 | ch->apb_seq &= ~APB_SEQ_WRAP_MASK; | 480 | apb_seq |= index << APB_SEQ_WRAP_SHIFT; |
464 | ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT; | ||
465 | 481 | ||
466 | /* set address wrap for AHB size */ | 482 | /* set address wrap for AHB size */ |
467 | index = 0; | 483 | index = 0; |
@@ -471,55 +487,42 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
471 | index++; | 487 | index++; |
472 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | 488 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); |
473 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | 489 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); |
474 | ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK; | 490 | ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; |
475 | ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | ||
476 | 491 | ||
477 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 492 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { |
478 | if (bus_width_table[index] == ahb_bus_width) | 493 | if (bus_width_table[index] == ahb_bus_width) |
479 | break; | 494 | break; |
480 | } | 495 | } |
481 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 496 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); |
482 | ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK; | 497 | ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; |
483 | ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | ||
484 | 498 | ||
485 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 499 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { |
486 | if (bus_width_table[index] == apb_bus_width) | 500 | if (bus_width_table[index] == apb_bus_width) |
487 | break; | 501 | break; |
488 | } | 502 | } |
489 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 503 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); |
490 | ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK; | 504 | apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; |
491 | ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | ||
492 | |||
493 | ch->csr |= CSR_IE_EOC; | ||
494 | 505 | ||
495 | /* update hw registers with the shadow */ | 506 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
496 | writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR); | 507 | writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); |
497 | writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | 508 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); |
498 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 509 | writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); |
499 | writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | 510 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); |
500 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | ||
501 | 511 | ||
502 | csr = ch->csr | CSR_ENB; | 512 | csr |= CSR_ENB; |
503 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 513 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
504 | 514 | ||
505 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 515 | req->status = TEGRA_DMA_REQ_INFLIGHT; |
506 | } | 516 | } |
507 | 517 | ||
508 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch) | ||
509 | { | ||
510 | /* One shot with an interrupt to CPU after transfer */ | ||
511 | ch->csr = CSR_ONCE | CSR_IE_EOC; | ||
512 | ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB; | ||
513 | ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT; | ||
514 | } | ||
515 | |||
516 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | 518 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) |
517 | { | 519 | { |
518 | struct tegra_dma_req *req; | 520 | struct tegra_dma_req *req; |
521 | unsigned long irq_flags; | ||
519 | 522 | ||
520 | spin_lock(&ch->lock); | 523 | spin_lock_irqsave(&ch->lock, irq_flags); |
521 | if (list_empty(&ch->list)) { | 524 | if (list_empty(&ch->list)) { |
522 | spin_unlock(&ch->lock); | 525 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
523 | return; | 526 | return; |
524 | } | 527 | } |
525 | 528 | ||
@@ -527,8 +530,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
527 | if (req) { | 530 | if (req) { |
528 | int bytes_transferred; | 531 | int bytes_transferred; |
529 | 532 | ||
530 | bytes_transferred = | 533 | bytes_transferred = ch->req_transfer_count; |
531 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | ||
532 | bytes_transferred += 1; | 534 | bytes_transferred += 1; |
533 | bytes_transferred <<= 2; | 535 | bytes_transferred <<= 2; |
534 | 536 | ||
@@ -536,12 +538,12 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
536 | req->bytes_transferred = bytes_transferred; | 538 | req->bytes_transferred = bytes_transferred; |
537 | req->status = TEGRA_DMA_REQ_SUCCESS; | 539 | req->status = TEGRA_DMA_REQ_SUCCESS; |
538 | 540 | ||
539 | spin_unlock(&ch->lock); | 541 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
540 | /* Callback should be called without any lock */ | 542 | /* Callback should be called without any lock */ |
541 | pr_debug("%s: transferred %d bytes\n", __func__, | 543 | pr_debug("%s: transferred %d bytes\n", __func__, |
542 | req->bytes_transferred); | 544 | req->bytes_transferred); |
543 | req->complete(req); | 545 | req->complete(req); |
544 | spin_lock(&ch->lock); | 546 | spin_lock_irqsave(&ch->lock, irq_flags); |
545 | } | 547 | } |
546 | 548 | ||
547 | if (!list_empty(&ch->list)) { | 549 | if (!list_empty(&ch->list)) { |
@@ -551,22 +553,55 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
551 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) | 553 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) |
552 | tegra_dma_update_hw(ch, req); | 554 | tegra_dma_update_hw(ch, req); |
553 | } | 555 | } |
554 | spin_unlock(&ch->lock); | 556 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
555 | } | 557 | } |
556 | 558 | ||
557 | static void handle_continuous_dma(struct tegra_dma_channel *ch) | 559 | static void handle_continuous_dma(struct tegra_dma_channel *ch) |
558 | { | 560 | { |
559 | struct tegra_dma_req *req; | 561 | struct tegra_dma_req *req; |
562 | unsigned long irq_flags; | ||
560 | 563 | ||
561 | spin_lock(&ch->lock); | 564 | spin_lock_irqsave(&ch->lock, irq_flags); |
562 | if (list_empty(&ch->list)) { | 565 | if (list_empty(&ch->list)) { |
563 | spin_unlock(&ch->lock); | 566 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
564 | return; | 567 | return; |
565 | } | 568 | } |
566 | 569 | ||
567 | req = list_entry(ch->list.next, typeof(*req), node); | 570 | req = list_entry(ch->list.next, typeof(*req), node); |
568 | if (req) { | 571 | if (req) { |
569 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | 572 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { |
573 | bool is_dma_ping_complete; | ||
574 | is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) | ||
575 | & STA_PING_PONG) ? true : false; | ||
576 | if (req->to_memory) | ||
577 | is_dma_ping_complete = !is_dma_ping_complete; | ||
578 | /* Out of sync - Release current buffer */ | ||
579 | if (!is_dma_ping_complete) { | ||
580 | int bytes_transferred; | ||
581 | |||
582 | bytes_transferred = ch->req_transfer_count; | ||
583 | bytes_transferred += 1; | ||
584 | bytes_transferred <<= 3; | ||
585 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | ||
586 | req->bytes_transferred = bytes_transferred; | ||
587 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
588 | tegra_dma_stop(ch); | ||
589 | |||
590 | if (!list_is_last(&req->node, &ch->list)) { | ||
591 | struct tegra_dma_req *next_req; | ||
592 | |||
593 | next_req = list_entry(req->node.next, | ||
594 | typeof(*next_req), node); | ||
595 | tegra_dma_update_hw(ch, next_req); | ||
596 | } | ||
597 | |||
598 | list_del(&req->node); | ||
599 | |||
600 | /* DMA lock is NOT held when callbak is called */ | ||
601 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
602 | req->complete(req); | ||
603 | return; | ||
604 | } | ||
570 | /* Load the next request into the hardware, if available | 605 | /* Load the next request into the hardware, if available |
571 | * */ | 606 | * */ |
572 | if (!list_is_last(&req->node, &ch->list)) { | 607 | if (!list_is_last(&req->node, &ch->list)) { |
@@ -579,7 +614,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
579 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | 614 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; |
580 | req->status = TEGRA_DMA_REQ_SUCCESS; | 615 | req->status = TEGRA_DMA_REQ_SUCCESS; |
581 | /* DMA lock is NOT held when callback is called */ | 616 | /* DMA lock is NOT held when callback is called */ |
582 | spin_unlock(&ch->lock); | 617 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
583 | if (likely(req->threshold)) | 618 | if (likely(req->threshold)) |
584 | req->threshold(req); | 619 | req->threshold(req); |
585 | return; | 620 | return; |
@@ -590,8 +625,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
590 | * the second interrupt */ | 625 | * the second interrupt */ |
591 | int bytes_transferred; | 626 | int bytes_transferred; |
592 | 627 | ||
593 | bytes_transferred = | 628 | bytes_transferred = ch->req_transfer_count; |
594 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | ||
595 | bytes_transferred += 1; | 629 | bytes_transferred += 1; |
596 | bytes_transferred <<= 3; | 630 | bytes_transferred <<= 3; |
597 | 631 | ||
@@ -601,7 +635,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
601 | list_del(&req->node); | 635 | list_del(&req->node); |
602 | 636 | ||
603 | /* DMA lock is NOT held when callbak is called */ | 637 | /* DMA lock is NOT held when callbak is called */ |
604 | spin_unlock(&ch->lock); | 638 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
605 | req->complete(req); | 639 | req->complete(req); |
606 | return; | 640 | return; |
607 | 641 | ||
@@ -609,7 +643,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
609 | BUG(); | 643 | BUG(); |
610 | } | 644 | } |
611 | } | 645 | } |
612 | spin_unlock(&ch->lock); | 646 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
613 | } | 647 | } |
614 | 648 | ||
615 | static irqreturn_t dma_isr(int irq, void *data) | 649 | static irqreturn_t dma_isr(int irq, void *data) |
@@ -646,6 +680,21 @@ int __init tegra_dma_init(void) | |||
646 | int i; | 680 | int i; |
647 | unsigned int irq; | 681 | unsigned int irq; |
648 | void __iomem *addr; | 682 | void __iomem *addr; |
683 | struct clk *c; | ||
684 | |||
685 | bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS); | ||
686 | |||
687 | c = clk_get_sys("tegra-dma", NULL); | ||
688 | if (IS_ERR(c)) { | ||
689 | pr_err("Unable to get clock for APB DMA\n"); | ||
690 | ret = PTR_ERR(c); | ||
691 | goto fail; | ||
692 | } | ||
693 | ret = clk_enable(c); | ||
694 | if (ret != 0) { | ||
695 | pr_err("Unable to enable clock for APB DMA\n"); | ||
696 | goto fail; | ||
697 | } | ||
649 | 698 | ||
650 | addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 699 | addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); |
651 | writel(GEN_ENABLE, addr + APB_DMA_GEN); | 700 | writel(GEN_ENABLE, addr + APB_DMA_GEN); |
@@ -653,18 +702,9 @@ int __init tegra_dma_init(void) | |||
653 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), | 702 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), |
654 | addr + APB_DMA_IRQ_MASK_SET); | 703 | addr + APB_DMA_IRQ_MASK_SET); |
655 | 704 | ||
656 | memset(channel_usage, 0, sizeof(channel_usage)); | ||
657 | memset(dma_channels, 0, sizeof(dma_channels)); | ||
658 | |||
659 | /* Reserve all the channels we are not supposed to touch */ | ||
660 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++) | ||
661 | __set_bit(i, channel_usage); | ||
662 | |||
663 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 705 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { |
664 | struct tegra_dma_channel *ch = &dma_channels[i]; | 706 | struct tegra_dma_channel *ch = &dma_channels[i]; |
665 | 707 | ||
666 | __clear_bit(i, channel_usage); | ||
667 | |||
668 | ch->id = i; | 708 | ch->id = i; |
669 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); | 709 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); |
670 | 710 | ||
@@ -673,7 +713,6 @@ int __init tegra_dma_init(void) | |||
673 | 713 | ||
674 | spin_lock_init(&ch->lock); | 714 | spin_lock_init(&ch->lock); |
675 | INIT_LIST_HEAD(&ch->list); | 715 | INIT_LIST_HEAD(&ch->list); |
676 | tegra_dma_init_hw(ch); | ||
677 | 716 | ||
678 | irq = INT_APB_DMA_CH0 + i; | 717 | irq = INT_APB_DMA_CH0 + i; |
679 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, | 718 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, |
@@ -684,14 +723,15 @@ int __init tegra_dma_init(void) | |||
684 | goto fail; | 723 | goto fail; |
685 | } | 724 | } |
686 | ch->irq = irq; | 725 | ch->irq = irq; |
726 | |||
727 | __clear_bit(i, channel_usage); | ||
687 | } | 728 | } |
688 | /* mark the shared channel allocated */ | 729 | /* mark the shared channel allocated */ |
689 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); | 730 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); |
690 | 731 | ||
691 | for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++) | 732 | tegra_dma_initialized = true; |
692 | __set_bit(i, channel_usage); | ||
693 | 733 | ||
694 | return ret; | 734 | return 0; |
695 | fail: | 735 | fail: |
696 | writel(0, addr + APB_DMA_GEN); | 736 | writel(0, addr + APB_DMA_GEN); |
697 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 737 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { |
@@ -701,6 +741,7 @@ fail: | |||
701 | } | 741 | } |
702 | return ret; | 742 | return ret; |
703 | } | 743 | } |
744 | postcore_initcall(tegra_dma_init); | ||
704 | 745 | ||
705 | #ifdef CONFIG_PM | 746 | #ifdef CONFIG_PM |
706 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; | 747 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; |