diff options
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 267 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma_regs.h | 30 | ||||
-rw-r--r-- | include/linux/intel_mid_dma.h | 3 |
3 files changed, 250 insertions, 50 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 3c4333ee1fb7..2ae1086b9481 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -258,6 +258,7 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
258 | /*write registers and en*/ | 258 | /*write registers and en*/ |
259 | iowrite32(first->sar, midc->ch_regs + SAR); | 259 | iowrite32(first->sar, midc->ch_regs + SAR); |
260 | iowrite32(first->dar, midc->ch_regs + DAR); | 260 | iowrite32(first->dar, midc->ch_regs + DAR); |
261 | iowrite32(first->lli_phys, midc->ch_regs + LLP); | ||
261 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); | 262 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); |
262 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); | 263 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); |
263 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); | 264 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); |
@@ -265,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
265 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", | 266 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", |
266 | (int)first->sar, (int)first->dar, first->cfg_hi, | 267 | (int)first->sar, (int)first->dar, first->cfg_hi, |
267 | first->cfg_lo, first->ctl_hi, first->ctl_lo); | 268 | first->cfg_lo, first->ctl_hi, first->ctl_lo); |
269 | first->status = DMA_IN_PROGRESS; | ||
268 | 270 | ||
269 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | 271 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); |
270 | first->status = DMA_IN_PROGRESS; | ||
271 | } | 272 | } |
272 | 273 | ||
273 | /** | 274 | /** |
@@ -284,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
284 | { | 285 | { |
285 | struct dma_async_tx_descriptor *txd = &desc->txd; | 286 | struct dma_async_tx_descriptor *txd = &desc->txd; |
286 | dma_async_tx_callback callback_txd = NULL; | 287 | dma_async_tx_callback callback_txd = NULL; |
288 | struct intel_mid_dma_lli *llitem; | ||
287 | void *param_txd = NULL; | 289 | void *param_txd = NULL; |
288 | 290 | ||
289 | midc->completed = txd->cookie; | 291 | midc->completed = txd->cookie; |
290 | callback_txd = txd->callback; | 292 | callback_txd = txd->callback; |
291 | param_txd = txd->callback_param; | 293 | param_txd = txd->callback_param; |
292 | 294 | ||
293 | list_move(&desc->desc_node, &midc->free_list); | 295 | if (desc->lli != NULL) { |
294 | midc->busy = false; | 296 | /*clear the DONE bit of completed LLI in memory*/ |
297 | llitem = desc->lli + desc->current_lli; | ||
298 | llitem->ctl_hi &= CLEAR_DONE; | ||
299 | if (desc->current_lli < desc->lli_length-1) | ||
300 | (desc->current_lli)++; | ||
301 | else | ||
302 | desc->current_lli = 0; | ||
303 | } | ||
295 | spin_unlock_bh(&midc->lock); | 304 | spin_unlock_bh(&midc->lock); |
296 | if (callback_txd) { | 305 | if (callback_txd) { |
297 | pr_debug("MDMA: TXD callback set ... calling\n"); | 306 | pr_debug("MDMA: TXD callback set ... calling\n"); |
298 | callback_txd(param_txd); | 307 | callback_txd(param_txd); |
299 | spin_lock_bh(&midc->lock); | 308 | } |
300 | return; | 309 | if (midc->raw_tfr) { |
310 | desc->status = DMA_SUCCESS; | ||
311 | if (desc->lli != NULL) { | ||
312 | pci_pool_free(desc->lli_pool, desc->lli, | ||
313 | desc->lli_phys); | ||
314 | pci_pool_destroy(desc->lli_pool); | ||
315 | } | ||
316 | list_move(&desc->desc_node, &midc->free_list); | ||
317 | midc->busy = false; | ||
301 | } | 318 | } |
302 | spin_lock_bh(&midc->lock); | 319 | spin_lock_bh(&midc->lock); |
303 | 320 | ||
@@ -318,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid, | |||
318 | 335 | ||
319 | /*tx is complete*/ | 336 | /*tx is complete*/ |
320 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 337 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
321 | if (desc->status == DMA_IN_PROGRESS) { | 338 | if (desc->status == DMA_IN_PROGRESS) |
322 | desc->status = DMA_SUCCESS; | ||
323 | midc_descriptor_complete(midc, desc); | 339 | midc_descriptor_complete(midc, desc); |
324 | } | ||
325 | } | 340 | } |
326 | return; | 341 | return; |
327 | } | 342 | } |
343 | /** | ||
344 | * midc_lli_fill_sg - Helper function to convert | ||
345 | * SG list to Linked List Items. | ||
346 | *@midc: Channel | ||
347 | *@desc: DMA descriptor | ||
348 | *@sglist: Pointer to SG list | ||
349 | *@sglen: SG list length | ||
350 | *@flags: DMA transaction flags | ||
351 | * | ||
352 | * Walk through the SG list and convert the SG list into Linked | ||
353 | * List Items (LLI). | ||
354 | */ | ||
355 | static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | ||
356 | struct intel_mid_dma_desc *desc, | ||
357 | struct scatterlist *sglist, | ||
358 | unsigned int sglen, | ||
359 | unsigned int flags) | ||
360 | { | ||
361 | struct intel_mid_dma_slave *mids; | ||
362 | struct scatterlist *sg; | ||
363 | dma_addr_t lli_next, sg_phy_addr; | ||
364 | struct intel_mid_dma_lli *lli_bloc_desc; | ||
365 | union intel_mid_dma_ctl_lo ctl_lo; | ||
366 | union intel_mid_dma_ctl_hi ctl_hi; | ||
367 | int i; | ||
368 | |||
369 | pr_debug("MDMA: Entered midc_lli_fill_sg\n"); | ||
370 | mids = midc->chan.private; | ||
371 | |||
372 | lli_bloc_desc = desc->lli; | ||
373 | lli_next = desc->lli_phys; | ||
328 | 374 | ||
375 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
376 | ctl_hi.ctl_hi = desc->ctl_hi; | ||
377 | for_each_sg(sglist, sg, sglen, i) { | ||
378 | /*Populate CTL_LOW and LLI values*/ | ||
379 | if (i != sglen - 1) { | ||
380 | lli_next = lli_next + | ||
381 | sizeof(struct intel_mid_dma_lli); | ||
382 | } else { | ||
383 | /*Check for circular list, otherwise terminate LLI to ZERO*/ | ||
384 | if (flags & DMA_PREP_CIRCULAR_LIST) { | ||
385 | pr_debug("MDMA: LLI is configured in circular mode\n"); | ||
386 | lli_next = desc->lli_phys; | ||
387 | } else { | ||
388 | lli_next = 0; | ||
389 | ctl_lo.ctlx.llp_dst_en = 0; | ||
390 | ctl_lo.ctlx.llp_src_en = 0; | ||
391 | } | ||
392 | } | ||
393 | /*Populate CTL_HI values*/ | ||
394 | ctl_hi.ctlx.block_ts = get_block_ts(sg->length, | ||
395 | desc->width, | ||
396 | midc->dma->block_size); | ||
397 | /*Populate SAR and DAR values*/ | ||
398 | sg_phy_addr = sg_phys(sg); | ||
399 | if (desc->dirn == DMA_TO_DEVICE) { | ||
400 | lli_bloc_desc->sar = sg_phy_addr; | ||
401 | lli_bloc_desc->dar = mids->per_addr; | ||
402 | } else if (desc->dirn == DMA_FROM_DEVICE) { | ||
403 | lli_bloc_desc->sar = mids->per_addr; | ||
404 | lli_bloc_desc->dar = sg_phy_addr; | ||
405 | } | ||
406 | /*Copy values into block descriptor in system memroy*/ | ||
407 | lli_bloc_desc->llp = lli_next; | ||
408 | lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; | ||
409 | lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; | ||
410 | |||
411 | lli_bloc_desc++; | ||
412 | } | ||
413 | /*Copy very first LLI values to descriptor*/ | ||
414 | desc->ctl_lo = desc->lli->ctl_lo; | ||
415 | desc->ctl_hi = desc->lli->ctl_hi; | ||
416 | desc->sar = desc->lli->sar; | ||
417 | desc->dar = desc->lli->dar; | ||
418 | |||
419 | return 0; | ||
420 | } | ||
329 | /***************************************************************************** | 421 | /***************************************************************************** |
330 | DMA engine callback Functions*/ | 422 | DMA engine callback Functions*/ |
331 | /** | 423 | /** |
@@ -350,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
350 | desc->txd.cookie = cookie; | 442 | desc->txd.cookie = cookie; |
351 | 443 | ||
352 | 444 | ||
353 | if (list_empty(&midc->active_list)) { | 445 | if (list_empty(&midc->active_list)) |
354 | midc_dostart(midc, desc); | ||
355 | list_add_tail(&desc->desc_node, &midc->active_list); | 446 | list_add_tail(&desc->desc_node, &midc->active_list); |
356 | } else { | 447 | else |
357 | list_add_tail(&desc->desc_node, &midc->queue); | 448 | list_add_tail(&desc->desc_node, &midc->queue); |
358 | } | 449 | |
450 | midc_dostart(midc, desc); | ||
359 | spin_unlock_bh(&midc->lock); | 451 | spin_unlock_bh(&midc->lock); |
360 | 452 | ||
361 | return cookie; | 453 | return cookie; |
@@ -429,7 +521,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
429 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 521 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
430 | struct middma_device *mid = to_middma_device(chan->device); | 522 | struct middma_device *mid = to_middma_device(chan->device); |
431 | struct intel_mid_dma_desc *desc, *_desc; | 523 | struct intel_mid_dma_desc *desc, *_desc; |
432 | LIST_HEAD(list); | 524 | union intel_mid_dma_cfg_lo cfg_lo; |
433 | 525 | ||
434 | if (cmd != DMA_TERMINATE_ALL) | 526 | if (cmd != DMA_TERMINATE_ALL) |
435 | return -ENXIO; | 527 | return -ENXIO; |
@@ -439,39 +531,29 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
439 | spin_unlock_bh(&midc->lock); | 531 | spin_unlock_bh(&midc->lock); |
440 | return 0; | 532 | return 0; |
441 | } | 533 | } |
442 | list_splice_init(&midc->free_list, &list); | 534 | /*Suspend and disable the channel*/ |
443 | midc->descs_allocated = 0; | 535 | cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); |
444 | midc->slave = NULL; | 536 | cfg_lo.cfgx.ch_susp = 1; |
445 | 537 | iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); | |
538 | iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | ||
539 | midc->busy = false; | ||
446 | /* Disable interrupts */ | 540 | /* Disable interrupts */ |
447 | disable_dma_interrupt(midc); | 541 | disable_dma_interrupt(midc); |
542 | midc->descs_allocated = 0; | ||
543 | midc->slave = NULL; | ||
448 | 544 | ||
449 | spin_unlock_bh(&midc->lock); | 545 | spin_unlock_bh(&midc->lock); |
450 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 546 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
451 | pr_debug("MDMA: freeing descriptor %p\n", desc); | 547 | if (desc->lli != NULL) { |
452 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | 548 | pci_pool_free(desc->lli_pool, desc->lli, |
549 | desc->lli_phys); | ||
550 | pci_pool_destroy(desc->lli_pool); | ||
551 | } | ||
552 | list_move(&desc->desc_node, &midc->free_list); | ||
453 | } | 553 | } |
454 | return 0; | 554 | return 0; |
455 | } | 555 | } |
456 | 556 | ||
457 | /** | ||
458 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
459 | * @chan: chan for DMA transfer | ||
460 | * @sgl: scatter gather list | ||
461 | * @sg_len: length of sg txn | ||
462 | * @direction: DMA transfer dirtn | ||
463 | * @flags: DMA flags | ||
464 | * | ||
465 | * Do DMA sg txn: NOT supported now | ||
466 | */ | ||
467 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
468 | struct dma_chan *chan, struct scatterlist *sgl, | ||
469 | unsigned int sg_len, enum dma_data_direction direction, | ||
470 | unsigned long flags) | ||
471 | { | ||
472 | /*not supported now*/ | ||
473 | return NULL; | ||
474 | } | ||
475 | 557 | ||
476 | /** | 558 | /** |
477 | * intel_mid_dma_prep_memcpy - Prep memcpy txn | 559 | * intel_mid_dma_prep_memcpy - Prep memcpy txn |
@@ -553,6 +635,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
553 | 635 | ||
554 | /*calculate CTL_HI*/ | 636 | /*calculate CTL_HI*/ |
555 | ctl_hi.ctlx.reser = 0; | 637 | ctl_hi.ctlx.reser = 0; |
638 | ctl_hi.ctlx.done = 0; | ||
556 | width = mids->src_width; | 639 | width = mids->src_width; |
557 | 640 | ||
558 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); | 641 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); |
@@ -599,6 +682,9 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
599 | desc->ctl_hi = ctl_hi.ctl_hi; | 682 | desc->ctl_hi = ctl_hi.ctl_hi; |
600 | desc->width = width; | 683 | desc->width = width; |
601 | desc->dirn = mids->dirn; | 684 | desc->dirn = mids->dirn; |
685 | desc->lli_phys = 0; | ||
686 | desc->lli = NULL; | ||
687 | desc->lli_pool = NULL; | ||
602 | return &desc->txd; | 688 | return &desc->txd; |
603 | 689 | ||
604 | err_desc_get: | 690 | err_desc_get: |
@@ -606,6 +692,85 @@ err_desc_get: | |||
606 | midc_desc_put(midc, desc); | 692 | midc_desc_put(midc, desc); |
607 | return NULL; | 693 | return NULL; |
608 | } | 694 | } |
695 | /** | ||
696 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
697 | * @chan: chan for DMA transfer | ||
698 | * @sgl: scatter gather list | ||
699 | * @sg_len: length of sg txn | ||
700 | * @direction: DMA transfer dirtn | ||
701 | * @flags: DMA flags | ||
702 | * | ||
703 | * Prepares LLI based periphral transfer | ||
704 | */ | ||
705 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
706 | struct dma_chan *chan, struct scatterlist *sgl, | ||
707 | unsigned int sg_len, enum dma_data_direction direction, | ||
708 | unsigned long flags) | ||
709 | { | ||
710 | struct intel_mid_dma_chan *midc = NULL; | ||
711 | struct intel_mid_dma_slave *mids = NULL; | ||
712 | struct intel_mid_dma_desc *desc = NULL; | ||
713 | struct dma_async_tx_descriptor *txd = NULL; | ||
714 | union intel_mid_dma_ctl_lo ctl_lo; | ||
715 | |||
716 | pr_debug("MDMA: Prep for slave SG\n"); | ||
717 | |||
718 | if (!sg_len) { | ||
719 | pr_err("MDMA: Invalid SG length\n"); | ||
720 | return NULL; | ||
721 | } | ||
722 | midc = to_intel_mid_dma_chan(chan); | ||
723 | BUG_ON(!midc); | ||
724 | |||
725 | mids = chan->private; | ||
726 | BUG_ON(!mids); | ||
727 | |||
728 | if (!midc->dma->pimr_mask) { | ||
729 | pr_debug("MDMA: SG list is not supported by this controller\n"); | ||
730 | return NULL; | ||
731 | } | ||
732 | |||
733 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | ||
734 | sg_len, direction, flags); | ||
735 | |||
736 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); | ||
737 | if (NULL == txd) { | ||
738 | pr_err("MDMA: Prep memcpy failed\n"); | ||
739 | return NULL; | ||
740 | } | ||
741 | desc = to_intel_mid_dma_desc(txd); | ||
742 | desc->dirn = direction; | ||
743 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
744 | ctl_lo.ctlx.llp_dst_en = 1; | ||
745 | ctl_lo.ctlx.llp_src_en = 1; | ||
746 | desc->ctl_lo = ctl_lo.ctl_lo; | ||
747 | desc->lli_length = sg_len; | ||
748 | desc->current_lli = 0; | ||
749 | /* DMA coherent memory pool for LLI descriptors*/ | ||
750 | desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", | ||
751 | midc->dma->pdev, | ||
752 | (sizeof(struct intel_mid_dma_lli)*sg_len), | ||
753 | 32, 0); | ||
754 | if (NULL == desc->lli_pool) { | ||
755 | pr_err("MID_DMA:LLI pool create failed\n"); | ||
756 | return NULL; | ||
757 | } | ||
758 | |||
759 | desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); | ||
760 | if (!desc->lli) { | ||
761 | pr_err("MID_DMA: LLI alloc failed\n"); | ||
762 | pci_pool_destroy(desc->lli_pool); | ||
763 | return NULL; | ||
764 | } | ||
765 | |||
766 | midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); | ||
767 | if (flags & DMA_PREP_INTERRUPT) { | ||
768 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
769 | midc->dma_base + MASK_BLOCK); | ||
770 | pr_debug("MDMA:Enabled Block interrupt\n"); | ||
771 | } | ||
772 | return &desc->txd; | ||
773 | } | ||
609 | 774 | ||
610 | /** | 775 | /** |
611 | * intel_mid_dma_free_chan_resources - Frees dma resources | 776 | * intel_mid_dma_free_chan_resources - Frees dma resources |
@@ -728,7 +893,7 @@ static void dma_tasklet(unsigned long data) | |||
728 | { | 893 | { |
729 | struct middma_device *mid = NULL; | 894 | struct middma_device *mid = NULL; |
730 | struct intel_mid_dma_chan *midc = NULL; | 895 | struct intel_mid_dma_chan *midc = NULL; |
731 | u32 status; | 896 | u32 status, raw_tfr, raw_block; |
732 | int i; | 897 | int i; |
733 | 898 | ||
734 | mid = (struct middma_device *)data; | 899 | mid = (struct middma_device *)data; |
@@ -737,8 +902,9 @@ static void dma_tasklet(unsigned long data) | |||
737 | return; | 902 | return; |
738 | } | 903 | } |
739 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); | 904 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); |
740 | status = ioread32(mid->dma_base + RAW_TFR); | 905 | raw_tfr = ioread32(mid->dma_base + RAW_TFR); |
741 | pr_debug("MDMA:RAW_TFR %x\n", status); | 906 | raw_block = ioread32(mid->dma_base + RAW_BLOCK); |
907 | status = raw_tfr | raw_block; | ||
742 | status &= mid->intr_mask; | 908 | status &= mid->intr_mask; |
743 | while (status) { | 909 | while (status) { |
744 | /*txn interrupt*/ | 910 | /*txn interrupt*/ |
@@ -754,15 +920,23 @@ static void dma_tasklet(unsigned long data) | |||
754 | } | 920 | } |
755 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | 921 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", |
756 | status, midc->ch_id, i); | 922 | status, midc->ch_id, i); |
923 | midc->raw_tfr = raw_tfr; | ||
924 | midc->raw_block = raw_block; | ||
925 | spin_lock_bh(&midc->lock); | ||
757 | /*clearing this interrupts first*/ | 926 | /*clearing this interrupts first*/ |
758 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); | 927 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); |
759 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); | 928 | if (raw_block) { |
760 | 929 | iowrite32((1 << midc->ch_id), | |
761 | spin_lock_bh(&midc->lock); | 930 | mid->dma_base + CLEAR_BLOCK); |
931 | } | ||
762 | midc_scan_descriptors(mid, midc); | 932 | midc_scan_descriptors(mid, midc); |
763 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); | 933 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); |
764 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | 934 | iowrite32(UNMASK_INTR_REG(midc->ch_id), |
765 | mid->dma_base + MASK_TFR); | 935 | mid->dma_base + MASK_TFR); |
936 | if (raw_block) { | ||
937 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
938 | mid->dma_base + MASK_BLOCK); | ||
939 | } | ||
766 | spin_unlock_bh(&midc->lock); | 940 | spin_unlock_bh(&midc->lock); |
767 | } | 941 | } |
768 | 942 | ||
@@ -836,7 +1010,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
836 | tfr_status &= mid->intr_mask; | 1010 | tfr_status &= mid->intr_mask; |
837 | if (tfr_status) { | 1011 | if (tfr_status) { |
838 | /*need to disable intr*/ | 1012 | /*need to disable intr*/ |
839 | iowrite32((tfr_status << 8), mid->dma_base + MASK_TFR); | 1013 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); |
1014 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); | ||
840 | pr_debug("MDMA: Calling tasklet %x\n", tfr_status); | 1015 | pr_debug("MDMA: Calling tasklet %x\n", tfr_status); |
841 | call_tasklet = 1; | 1016 | call_tasklet = 1; |
842 | } | 1017 | } |
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index a12dd2572dc3..7a5ac56d1324 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
@@ -29,11 +29,12 @@ | |||
29 | #include <linux/dmapool.h> | 29 | #include <linux/dmapool.h> |
30 | #include <linux/pci_ids.h> | 30 | #include <linux/pci_ids.h> |
31 | 31 | ||
32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.0.6" | 32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" |
33 | 33 | ||
34 | #define REG_BIT0 0x00000001 | 34 | #define REG_BIT0 0x00000001 |
35 | #define REG_BIT8 0x00000100 | 35 | #define REG_BIT8 0x00000100 |
36 | 36 | #define INT_MASK_WE 0x8 | |
37 | #define CLEAR_DONE 0xFFFFEFFF | ||
37 | #define UNMASK_INTR_REG(chan_num) \ | 38 | #define UNMASK_INTR_REG(chan_num) \ |
38 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | 39 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) |
39 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) | 40 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) |
@@ -41,6 +42,9 @@ | |||
41 | #define ENABLE_CHANNEL(chan_num) \ | 42 | #define ENABLE_CHANNEL(chan_num) \ |
42 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | 43 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) |
43 | 44 | ||
45 | #define DISABLE_CHANNEL(chan_num) \ | ||
46 | (REG_BIT8 << chan_num) | ||
47 | |||
44 | #define DESCS_PER_CHANNEL 16 | 48 | #define DESCS_PER_CHANNEL 16 |
45 | /*DMA Registers*/ | 49 | /*DMA Registers*/ |
46 | /*registers associated with channel programming*/ | 50 | /*registers associated with channel programming*/ |
@@ -50,6 +54,7 @@ | |||
50 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ | 54 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ |
51 | #define SAR 0x00 /* Source Address Register*/ | 55 | #define SAR 0x00 /* Source Address Register*/ |
52 | #define DAR 0x08 /* Destination Address Register*/ | 56 | #define DAR 0x08 /* Destination Address Register*/ |
57 | #define LLP 0x10 /* Linked List Pointer Register*/ | ||
53 | #define CTL_LOW 0x18 /* Control Register*/ | 58 | #define CTL_LOW 0x18 /* Control Register*/ |
54 | #define CTL_HIGH 0x1C /* Control Register*/ | 59 | #define CTL_HIGH 0x1C /* Control Register*/ |
55 | #define CFG_LOW 0x40 /* Configuration Register Low*/ | 60 | #define CFG_LOW 0x40 /* Configuration Register Low*/ |
@@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo { | |||
112 | union intel_mid_dma_ctl_hi { | 117 | union intel_mid_dma_ctl_hi { |
113 | struct { | 118 | struct { |
114 | u32 block_ts:12; /*block transfer size*/ | 119 | u32 block_ts:12; /*block transfer size*/ |
115 | /*configured by DMAC*/ | 120 | u32 done:1; /*Done - updated by DMAC*/ |
116 | u32 reser:20; | 121 | u32 reser:19; /*configured by DMAC*/ |
117 | } ctlx; | 122 | } ctlx; |
118 | u32 ctl_hi; | 123 | u32 ctl_hi; |
119 | 124 | ||
@@ -169,6 +174,8 @@ union intel_mid_dma_cfg_hi { | |||
169 | * @dma: dma device struture pointer | 174 | * @dma: dma device struture pointer |
170 | * @busy: bool representing if ch is busy (active txn) or not | 175 | * @busy: bool representing if ch is busy (active txn) or not |
171 | * @in_use: bool representing if ch is in use or not | 176 | * @in_use: bool representing if ch is in use or not |
177 | * @raw_tfr: raw trf interrupt recieved | ||
178 | * @raw_block: raw block interrupt recieved | ||
172 | */ | 179 | */ |
173 | struct intel_mid_dma_chan { | 180 | struct intel_mid_dma_chan { |
174 | struct dma_chan chan; | 181 | struct dma_chan chan; |
@@ -185,6 +192,8 @@ struct intel_mid_dma_chan { | |||
185 | struct middma_device *dma; | 192 | struct middma_device *dma; |
186 | bool busy; | 193 | bool busy; |
187 | bool in_use; | 194 | bool in_use; |
195 | u32 raw_tfr; | ||
196 | u32 raw_block; | ||
188 | }; | 197 | }; |
189 | 198 | ||
190 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | 199 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( |
@@ -247,6 +256,11 @@ struct intel_mid_dma_desc { | |||
247 | u32 cfg_lo; | 256 | u32 cfg_lo; |
248 | u32 ctl_lo; | 257 | u32 ctl_lo; |
249 | u32 ctl_hi; | 258 | u32 ctl_hi; |
259 | struct pci_pool *lli_pool; | ||
260 | struct intel_mid_dma_lli *lli; | ||
261 | dma_addr_t lli_phys; | ||
262 | unsigned int lli_length; | ||
263 | unsigned int current_lli; | ||
250 | dma_addr_t next; | 264 | dma_addr_t next; |
251 | enum dma_data_direction dirn; | 265 | enum dma_data_direction dirn; |
252 | enum dma_status status; | 266 | enum dma_status status; |
@@ -255,6 +269,14 @@ struct intel_mid_dma_desc { | |||
255 | 269 | ||
256 | }; | 270 | }; |
257 | 271 | ||
272 | struct intel_mid_dma_lli { | ||
273 | dma_addr_t sar; | ||
274 | dma_addr_t dar; | ||
275 | dma_addr_t llp; | ||
276 | u32 ctl_lo; | ||
277 | u32 ctl_hi; | ||
278 | } __attribute__ ((packed)); | ||
279 | |||
258 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) | 280 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) |
259 | { | 281 | { |
260 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); | 282 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); |
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h index d9d08b6269b6..befe3fbd9e28 100644 --- a/include/linux/intel_mid_dma.h +++ b/include/linux/intel_mid_dma.h | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <linux/dmaengine.h> | 28 | #include <linux/dmaengine.h> |
29 | 29 | ||
30 | #define DMA_PREP_CIRCULAR_LIST (1 << 10) | ||
30 | /*DMA transaction width, src and dstn width would be same | 31 | /*DMA transaction width, src and dstn width would be same |
31 | The DMA length must be width aligned, | 32 | The DMA length must be width aligned, |
32 | for 32 bit width the length must be 32 bit (4bytes) aligned only*/ | 33 | for 32 bit width the length must be 32 bit (4bytes) aligned only*/ |
@@ -69,6 +70,7 @@ enum intel_mid_dma_msize { | |||
69 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) | 70 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) |
70 | * @src_msize: Source DMA burst size | 71 | * @src_msize: Source DMA burst size |
71 | * @dst_msize: Dst DMA burst size | 72 | * @dst_msize: Dst DMA burst size |
73 | * @per_addr: Periphral address | ||
72 | * @device_instance: DMA peripheral device instance, we can have multiple | 74 | * @device_instance: DMA peripheral device instance, we can have multiple |
73 | * peripheral device connected to single DMAC | 75 | * peripheral device connected to single DMAC |
74 | */ | 76 | */ |
@@ -80,6 +82,7 @@ struct intel_mid_dma_slave { | |||
80 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 82 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
81 | enum intel_mid_dma_msize src_msize; /*size if src burst*/ | 83 | enum intel_mid_dma_msize src_msize; /*size if src burst*/ |
82 | enum intel_mid_dma_msize dst_msize; /*size of dst burst*/ | 84 | enum intel_mid_dma_msize dst_msize; /*size of dst burst*/ |
85 | dma_addr_t per_addr; /*Peripheral address*/ | ||
83 | unsigned int device_instance; /*0, 1 for periphral instance*/ | 86 | unsigned int device_instance; /*0, 1 for periphral instance*/ |
84 | }; | 87 | }; |
85 | 88 | ||