diff options
-rw-r--r-- | arch/arm/mach-tegra/dma.c | 197 |
1 files changed, 108 insertions, 89 deletions
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index a2a252db024b..250bc7baa00a 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c | |||
@@ -121,17 +121,13 @@ struct tegra_dma_channel { | |||
121 | void __iomem *addr; | 121 | void __iomem *addr; |
122 | int mode; | 122 | int mode; |
123 | int irq; | 123 | int irq; |
124 | 124 | int req_transfer_count; | |
125 | /* Register shadow */ | ||
126 | u32 csr; | ||
127 | u32 ahb_seq; | ||
128 | u32 ahb_ptr; | ||
129 | u32 apb_seq; | ||
130 | u32 apb_ptr; | ||
131 | }; | 125 | }; |
132 | 126 | ||
133 | #define NV_DMA_MAX_CHANNELS 32 | 127 | #define NV_DMA_MAX_CHANNELS 32 |
134 | 128 | ||
129 | static DEFINE_MUTEX(tegra_dma_lock); | ||
130 | |||
135 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | 131 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); |
136 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | 132 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; |
137 | 133 | ||
@@ -139,7 +135,6 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
139 | struct tegra_dma_req *req); | 135 | struct tegra_dma_req *req); |
140 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 136 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, |
141 | struct tegra_dma_req *req); | 137 | struct tegra_dma_req *req); |
142 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch); | ||
143 | static void tegra_dma_stop(struct tegra_dma_channel *ch); | 138 | static void tegra_dma_stop(struct tegra_dma_channel *ch); |
144 | 139 | ||
145 | void tegra_dma_flush(struct tegra_dma_channel *ch) | 140 | void tegra_dma_flush(struct tegra_dma_channel *ch) |
@@ -151,6 +146,9 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) | |||
151 | { | 146 | { |
152 | struct tegra_dma_req *req; | 147 | struct tegra_dma_req *req; |
153 | 148 | ||
149 | if (tegra_dma_is_empty(ch)) | ||
150 | return; | ||
151 | |||
154 | req = list_entry(ch->list.next, typeof(*req), node); | 152 | req = list_entry(ch->list.next, typeof(*req), node); |
155 | 153 | ||
156 | tegra_dma_dequeue_req(ch, req); | 154 | tegra_dma_dequeue_req(ch, req); |
@@ -159,10 +157,10 @@ void tegra_dma_dequeue(struct tegra_dma_channel *ch) | |||
159 | 157 | ||
160 | void tegra_dma_stop(struct tegra_dma_channel *ch) | 158 | void tegra_dma_stop(struct tegra_dma_channel *ch) |
161 | { | 159 | { |
162 | unsigned int csr; | 160 | u32 csr; |
163 | unsigned int status; | 161 | u32 status; |
164 | 162 | ||
165 | csr = ch->csr; | 163 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
166 | csr &= ~CSR_IE_EOC; | 164 | csr &= ~CSR_IE_EOC; |
167 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 165 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
168 | 166 | ||
@@ -176,19 +174,16 @@ void tegra_dma_stop(struct tegra_dma_channel *ch) | |||
176 | 174 | ||
177 | int tegra_dma_cancel(struct tegra_dma_channel *ch) | 175 | int tegra_dma_cancel(struct tegra_dma_channel *ch) |
178 | { | 176 | { |
179 | unsigned int csr; | 177 | u32 csr; |
180 | unsigned long irq_flags; | 178 | unsigned long irq_flags; |
181 | 179 | ||
182 | spin_lock_irqsave(&ch->lock, irq_flags); | 180 | spin_lock_irqsave(&ch->lock, irq_flags); |
183 | while (!list_empty(&ch->list)) | 181 | while (!list_empty(&ch->list)) |
184 | list_del(ch->list.next); | 182 | list_del(ch->list.next); |
185 | 183 | ||
186 | csr = ch->csr; | 184 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
187 | csr &= ~CSR_REQ_SEL_MASK; | 185 | csr &= ~CSR_REQ_SEL_MASK; |
188 | csr |= CSR_REQ_SEL_INVALID; | 186 | csr |= CSR_REQ_SEL_INVALID; |
189 | |||
190 | /* Set the enable as that is not shadowed */ | ||
191 | csr |= CSR_ENB; | ||
192 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 187 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
193 | 188 | ||
194 | tegra_dma_stop(ch); | 189 | tegra_dma_stop(ch); |
@@ -230,18 +225,15 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | |||
230 | * - Finally stop or program the DMA to the next buffer in the | 225 | * - Finally stop or program the DMA to the next buffer in the |
231 | * list. | 226 | * list. |
232 | */ | 227 | */ |
233 | csr = ch->csr; | 228 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); |
234 | csr &= ~CSR_REQ_SEL_MASK; | 229 | csr &= ~CSR_REQ_SEL_MASK; |
235 | csr |= CSR_REQ_SEL_INVALID; | 230 | csr |= CSR_REQ_SEL_INVALID; |
236 | |||
237 | /* Set the enable as that is not shadowed */ | ||
238 | csr |= CSR_ENB; | ||
239 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 231 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
240 | 232 | ||
241 | /* Get the transfer count */ | 233 | /* Get the transfer count */ |
242 | status = readl(ch->addr + APB_DMA_CHAN_STA); | 234 | status = readl(ch->addr + APB_DMA_CHAN_STA); |
243 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; | 235 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; |
244 | req_transfer_count = (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | 236 | req_transfer_count = ch->req_transfer_count; |
245 | req_transfer_count += 1; | 237 | req_transfer_count += 1; |
246 | to_transfer += 1; | 238 | to_transfer += 1; |
247 | 239 | ||
@@ -349,7 +341,9 @@ EXPORT_SYMBOL(tegra_dma_enqueue_req); | |||
349 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | 341 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) |
350 | { | 342 | { |
351 | int channel; | 343 | int channel; |
352 | struct tegra_dma_channel *ch; | 344 | struct tegra_dma_channel *ch = NULL; |
345 | |||
346 | mutex_lock(&tegra_dma_lock); | ||
353 | 347 | ||
354 | /* first channel is the shared channel */ | 348 | /* first channel is the shared channel */ |
355 | if (mode & TEGRA_DMA_SHARED) { | 349 | if (mode & TEGRA_DMA_SHARED) { |
@@ -358,11 +352,14 @@ struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | |||
358 | channel = find_first_zero_bit(channel_usage, | 352 | channel = find_first_zero_bit(channel_usage, |
359 | ARRAY_SIZE(dma_channels)); | 353 | ARRAY_SIZE(dma_channels)); |
360 | if (channel >= ARRAY_SIZE(dma_channels)) | 354 | if (channel >= ARRAY_SIZE(dma_channels)) |
361 | return NULL; | 355 | goto out; |
362 | } | 356 | } |
363 | __set_bit(channel, channel_usage); | 357 | __set_bit(channel, channel_usage); |
364 | ch = &dma_channels[channel]; | 358 | ch = &dma_channels[channel]; |
365 | ch->mode = mode; | 359 | ch->mode = mode; |
360 | |||
361 | out: | ||
362 | mutex_unlock(&tegra_dma_lock); | ||
366 | return ch; | 363 | return ch; |
367 | } | 364 | } |
368 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | 365 | EXPORT_SYMBOL(tegra_dma_allocate_channel); |
@@ -372,22 +369,27 @@ void tegra_dma_free_channel(struct tegra_dma_channel *ch) | |||
372 | if (ch->mode & TEGRA_DMA_SHARED) | 369 | if (ch->mode & TEGRA_DMA_SHARED) |
373 | return; | 370 | return; |
374 | tegra_dma_cancel(ch); | 371 | tegra_dma_cancel(ch); |
372 | mutex_lock(&tegra_dma_lock); | ||
375 | __clear_bit(ch->id, channel_usage); | 373 | __clear_bit(ch->id, channel_usage); |
374 | mutex_unlock(&tegra_dma_lock); | ||
376 | } | 375 | } |
377 | EXPORT_SYMBOL(tegra_dma_free_channel); | 376 | EXPORT_SYMBOL(tegra_dma_free_channel); |
378 | 377 | ||
379 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 378 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, |
380 | struct tegra_dma_req *req) | 379 | struct tegra_dma_req *req) |
381 | { | 380 | { |
381 | u32 apb_ptr; | ||
382 | u32 ahb_ptr; | ||
383 | |||
382 | if (req->to_memory) { | 384 | if (req->to_memory) { |
383 | ch->apb_ptr = req->source_addr; | 385 | apb_ptr = req->source_addr; |
384 | ch->ahb_ptr = req->dest_addr; | 386 | ahb_ptr = req->dest_addr; |
385 | } else { | 387 | } else { |
386 | ch->apb_ptr = req->dest_addr; | 388 | apb_ptr = req->dest_addr; |
387 | ch->ahb_ptr = req->source_addr; | 389 | ahb_ptr = req->source_addr; |
388 | } | 390 | } |
389 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 391 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); |
390 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | 392 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); |
391 | 393 | ||
392 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 394 | req->status = TEGRA_DMA_REQ_INFLIGHT; |
393 | return; | 395 | return; |
@@ -401,38 +403,39 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
401 | int ahb_bus_width; | 403 | int ahb_bus_width; |
402 | int apb_bus_width; | 404 | int apb_bus_width; |
403 | int index; | 405 | int index; |
404 | unsigned long csr; | ||
405 | 406 | ||
407 | u32 ahb_seq; | ||
408 | u32 apb_seq; | ||
409 | u32 ahb_ptr; | ||
410 | u32 apb_ptr; | ||
411 | u32 csr; | ||
412 | |||
413 | csr = CSR_IE_EOC | CSR_FLOW; | ||
414 | ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; | ||
415 | apb_seq = 0; | ||
406 | 416 | ||
407 | ch->csr |= CSR_FLOW; | 417 | csr |= req->req_sel << CSR_REQ_SEL_SHIFT; |
408 | ch->csr &= ~CSR_REQ_SEL_MASK; | ||
409 | ch->csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | ||
410 | ch->ahb_seq &= ~AHB_SEQ_BURST_MASK; | ||
411 | ch->ahb_seq |= AHB_SEQ_BURST_1; | ||
412 | 418 | ||
413 | /* One shot mode is always single buffered, | 419 | /* One shot mode is always single buffered, |
414 | * continuous mode is always double buffered | 420 | * continuous mode is always double buffered |
415 | * */ | 421 | * */ |
416 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { | 422 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { |
417 | ch->csr |= CSR_ONCE; | 423 | csr |= CSR_ONCE; |
418 | ch->ahb_seq &= ~AHB_SEQ_DBL_BUF; | 424 | ch->req_transfer_count = (req->size >> 2) - 1; |
419 | ch->csr &= ~CSR_WCOUNT_MASK; | ||
420 | ch->csr |= ((req->size>>2) - 1) << CSR_WCOUNT_SHIFT; | ||
421 | } else { | 425 | } else { |
422 | ch->csr &= ~CSR_ONCE; | 426 | ahb_seq |= AHB_SEQ_DBL_BUF; |
423 | ch->ahb_seq |= AHB_SEQ_DBL_BUF; | ||
424 | 427 | ||
425 | /* In double buffered mode, we set the size to half the | 428 | /* In double buffered mode, we set the size to half the |
426 | * requested size and interrupt when half the buffer | 429 | * requested size and interrupt when half the buffer |
427 | * is full */ | 430 | * is full */ |
428 | ch->csr &= ~CSR_WCOUNT_MASK; | 431 | ch->req_transfer_count = (req->size >> 3) - 1; |
429 | ch->csr |= ((req->size>>3) - 1) << CSR_WCOUNT_SHIFT; | ||
430 | } | 432 | } |
431 | 433 | ||
434 | csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; | ||
435 | |||
432 | if (req->to_memory) { | 436 | if (req->to_memory) { |
433 | ch->csr &= ~CSR_DIR; | 437 | apb_ptr = req->source_addr; |
434 | ch->apb_ptr = req->source_addr; | 438 | ahb_ptr = req->dest_addr; |
435 | ch->ahb_ptr = req->dest_addr; | ||
436 | 439 | ||
437 | apb_addr_wrap = req->source_wrap; | 440 | apb_addr_wrap = req->source_wrap; |
438 | ahb_addr_wrap = req->dest_wrap; | 441 | ahb_addr_wrap = req->dest_wrap; |
@@ -440,9 +443,9 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
440 | ahb_bus_width = req->dest_bus_width; | 443 | ahb_bus_width = req->dest_bus_width; |
441 | 444 | ||
442 | } else { | 445 | } else { |
443 | ch->csr |= CSR_DIR; | 446 | csr |= CSR_DIR; |
444 | ch->apb_ptr = req->dest_addr; | 447 | apb_ptr = req->dest_addr; |
445 | ch->ahb_ptr = req->source_addr; | 448 | ahb_ptr = req->source_addr; |
446 | 449 | ||
447 | apb_addr_wrap = req->dest_wrap; | 450 | apb_addr_wrap = req->dest_wrap; |
448 | ahb_addr_wrap = req->source_wrap; | 451 | ahb_addr_wrap = req->source_wrap; |
@@ -461,8 +464,7 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
461 | index++; | 464 | index++; |
462 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); | 465 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); |
463 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | 466 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); |
464 | ch->apb_seq &= ~APB_SEQ_WRAP_MASK; | 467 | apb_seq |= index << APB_SEQ_WRAP_SHIFT; |
465 | ch->apb_seq |= index << APB_SEQ_WRAP_SHIFT; | ||
466 | 468 | ||
467 | /* set address wrap for AHB size */ | 469 | /* set address wrap for AHB size */ |
468 | index = 0; | 470 | index = 0; |
@@ -472,55 +474,42 @@ static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | |||
472 | index++; | 474 | index++; |
473 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | 475 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); |
474 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | 476 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); |
475 | ch->ahb_seq &= ~AHB_SEQ_WRAP_MASK; | 477 | ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; |
476 | ch->ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | ||
477 | 478 | ||
478 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 479 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { |
479 | if (bus_width_table[index] == ahb_bus_width) | 480 | if (bus_width_table[index] == ahb_bus_width) |
480 | break; | 481 | break; |
481 | } | 482 | } |
482 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 483 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); |
483 | ch->ahb_seq &= ~AHB_SEQ_BUS_WIDTH_MASK; | 484 | ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; |
484 | ch->ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | ||
485 | 485 | ||
486 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 486 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { |
487 | if (bus_width_table[index] == apb_bus_width) | 487 | if (bus_width_table[index] == apb_bus_width) |
488 | break; | 488 | break; |
489 | } | 489 | } |
490 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 490 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); |
491 | ch->apb_seq &= ~APB_SEQ_BUS_WIDTH_MASK; | 491 | apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; |
492 | ch->apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | ||
493 | |||
494 | ch->csr |= CSR_IE_EOC; | ||
495 | 492 | ||
496 | /* update hw registers with the shadow */ | 493 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
497 | writel(ch->csr, ch->addr + APB_DMA_CHAN_CSR); | 494 | writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); |
498 | writel(ch->apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | 495 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); |
499 | writel(ch->apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 496 | writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); |
500 | writel(ch->ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | 497 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); |
501 | writel(ch->ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | ||
502 | 498 | ||
503 | csr = ch->csr | CSR_ENB; | 499 | csr |= CSR_ENB; |
504 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 500 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); |
505 | 501 | ||
506 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 502 | req->status = TEGRA_DMA_REQ_INFLIGHT; |
507 | } | 503 | } |
508 | 504 | ||
509 | static void tegra_dma_init_hw(struct tegra_dma_channel *ch) | ||
510 | { | ||
511 | /* One shot with an interrupt to CPU after transfer */ | ||
512 | ch->csr = CSR_ONCE | CSR_IE_EOC; | ||
513 | ch->ahb_seq = AHB_SEQ_BUS_WIDTH_32 | AHB_SEQ_INTR_ENB; | ||
514 | ch->apb_seq = APB_SEQ_BUS_WIDTH_32 | 1 << APB_SEQ_WRAP_SHIFT; | ||
515 | } | ||
516 | |||
517 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | 505 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) |
518 | { | 506 | { |
519 | struct tegra_dma_req *req; | 507 | struct tegra_dma_req *req; |
508 | unsigned long irq_flags; | ||
520 | 509 | ||
521 | spin_lock(&ch->lock); | 510 | spin_lock_irqsave(&ch->lock, irq_flags); |
522 | if (list_empty(&ch->list)) { | 511 | if (list_empty(&ch->list)) { |
523 | spin_unlock(&ch->lock); | 512 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
524 | return; | 513 | return; |
525 | } | 514 | } |
526 | 515 | ||
@@ -528,8 +517,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
528 | if (req) { | 517 | if (req) { |
529 | int bytes_transferred; | 518 | int bytes_transferred; |
530 | 519 | ||
531 | bytes_transferred = | 520 | bytes_transferred = ch->req_transfer_count; |
532 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | ||
533 | bytes_transferred += 1; | 521 | bytes_transferred += 1; |
534 | bytes_transferred <<= 2; | 522 | bytes_transferred <<= 2; |
535 | 523 | ||
@@ -537,12 +525,12 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
537 | req->bytes_transferred = bytes_transferred; | 525 | req->bytes_transferred = bytes_transferred; |
538 | req->status = TEGRA_DMA_REQ_SUCCESS; | 526 | req->status = TEGRA_DMA_REQ_SUCCESS; |
539 | 527 | ||
540 | spin_unlock(&ch->lock); | 528 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
541 | /* Callback should be called without any lock */ | 529 | /* Callback should be called without any lock */ |
542 | pr_debug("%s: transferred %d bytes\n", __func__, | 530 | pr_debug("%s: transferred %d bytes\n", __func__, |
543 | req->bytes_transferred); | 531 | req->bytes_transferred); |
544 | req->complete(req); | 532 | req->complete(req); |
545 | spin_lock(&ch->lock); | 533 | spin_lock_irqsave(&ch->lock, irq_flags); |
546 | } | 534 | } |
547 | 535 | ||
548 | if (!list_empty(&ch->list)) { | 536 | if (!list_empty(&ch->list)) { |
@@ -552,22 +540,55 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) | |||
552 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) | 540 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) |
553 | tegra_dma_update_hw(ch, req); | 541 | tegra_dma_update_hw(ch, req); |
554 | } | 542 | } |
555 | spin_unlock(&ch->lock); | 543 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
556 | } | 544 | } |
557 | 545 | ||
558 | static void handle_continuous_dma(struct tegra_dma_channel *ch) | 546 | static void handle_continuous_dma(struct tegra_dma_channel *ch) |
559 | { | 547 | { |
560 | struct tegra_dma_req *req; | 548 | struct tegra_dma_req *req; |
549 | unsigned long irq_flags; | ||
561 | 550 | ||
562 | spin_lock(&ch->lock); | 551 | spin_lock_irqsave(&ch->lock, irq_flags); |
563 | if (list_empty(&ch->list)) { | 552 | if (list_empty(&ch->list)) { |
564 | spin_unlock(&ch->lock); | 553 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
565 | return; | 554 | return; |
566 | } | 555 | } |
567 | 556 | ||
568 | req = list_entry(ch->list.next, typeof(*req), node); | 557 | req = list_entry(ch->list.next, typeof(*req), node); |
569 | if (req) { | 558 | if (req) { |
570 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | 559 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { |
560 | bool is_dma_ping_complete; | ||
561 | is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) | ||
562 | & STA_PING_PONG) ? true : false; | ||
563 | if (req->to_memory) | ||
564 | is_dma_ping_complete = !is_dma_ping_complete; | ||
565 | /* Out of sync - Release current buffer */ | ||
566 | if (!is_dma_ping_complete) { | ||
567 | int bytes_transferred; | ||
568 | |||
569 | bytes_transferred = ch->req_transfer_count; | ||
570 | bytes_transferred += 1; | ||
571 | bytes_transferred <<= 3; | ||
572 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | ||
573 | req->bytes_transferred = bytes_transferred; | ||
574 | req->status = TEGRA_DMA_REQ_SUCCESS; | ||
575 | tegra_dma_stop(ch); | ||
576 | |||
577 | if (!list_is_last(&req->node, &ch->list)) { | ||
578 | struct tegra_dma_req *next_req; | ||
579 | |||
580 | next_req = list_entry(req->node.next, | ||
581 | typeof(*next_req), node); | ||
582 | tegra_dma_update_hw(ch, next_req); | ||
583 | } | ||
584 | |||
585 | list_del(&req->node); | ||
586 | |||
587 | /* DMA lock is NOT held when callbak is called */ | ||
588 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
589 | req->complete(req); | ||
590 | return; | ||
591 | } | ||
571 | /* Load the next request into the hardware, if available | 592 | /* Load the next request into the hardware, if available |
572 | * */ | 593 | * */ |
573 | if (!list_is_last(&req->node, &ch->list)) { | 594 | if (!list_is_last(&req->node, &ch->list)) { |
@@ -580,7 +601,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
580 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | 601 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; |
581 | req->status = TEGRA_DMA_REQ_SUCCESS; | 602 | req->status = TEGRA_DMA_REQ_SUCCESS; |
582 | /* DMA lock is NOT held when callback is called */ | 603 | /* DMA lock is NOT held when callback is called */ |
583 | spin_unlock(&ch->lock); | 604 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
584 | if (likely(req->threshold)) | 605 | if (likely(req->threshold)) |
585 | req->threshold(req); | 606 | req->threshold(req); |
586 | return; | 607 | return; |
@@ -591,8 +612,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
591 | * the second interrupt */ | 612 | * the second interrupt */ |
592 | int bytes_transferred; | 613 | int bytes_transferred; |
593 | 614 | ||
594 | bytes_transferred = | 615 | bytes_transferred = ch->req_transfer_count; |
595 | (ch->csr & CSR_WCOUNT_MASK) >> CSR_WCOUNT_SHIFT; | ||
596 | bytes_transferred += 1; | 616 | bytes_transferred += 1; |
597 | bytes_transferred <<= 3; | 617 | bytes_transferred <<= 3; |
598 | 618 | ||
@@ -602,7 +622,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
602 | list_del(&req->node); | 622 | list_del(&req->node); |
603 | 623 | ||
604 | /* DMA lock is NOT held when callbak is called */ | 624 | /* DMA lock is NOT held when callbak is called */ |
605 | spin_unlock(&ch->lock); | 625 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
606 | req->complete(req); | 626 | req->complete(req); |
607 | return; | 627 | return; |
608 | 628 | ||
@@ -610,7 +630,7 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) | |||
610 | BUG(); | 630 | BUG(); |
611 | } | 631 | } |
612 | } | 632 | } |
613 | spin_unlock(&ch->lock); | 633 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
614 | } | 634 | } |
615 | 635 | ||
616 | static irqreturn_t dma_isr(int irq, void *data) | 636 | static irqreturn_t dma_isr(int irq, void *data) |
@@ -674,7 +694,6 @@ int __init tegra_dma_init(void) | |||
674 | 694 | ||
675 | spin_lock_init(&ch->lock); | 695 | spin_lock_init(&ch->lock); |
676 | INIT_LIST_HEAD(&ch->list); | 696 | INIT_LIST_HEAD(&ch->list); |
677 | tegra_dma_init_hw(ch); | ||
678 | 697 | ||
679 | irq = INT_APB_DMA_CH0 + i; | 698 | irq = INT_APB_DMA_CH0 + i; |
680 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, | 699 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, |