diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 15:00:55 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-08 20:30:23 -0400 |
commit | 6df9183a153291a2585a8dfe67597fc18c201147 (patch) | |
tree | 5e5f3b3da9308e20f2dda71c85242460bb7cacfa /drivers/dma/ioat/dma.c | |
parent | 38e12f64a165e83617c21dae3c15972fd8d639f5 (diff) |
ioat: add some dev_dbg() calls
Provide some output for debugging the driver.
Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r-- | drivers/dma/ioat/dma.c | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 696d4de3bb8f..edf4f5e5de73 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -134,6 +134,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) | |||
134 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | 134 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); |
135 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | 135 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
136 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | 136 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
137 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); | ||
137 | 138 | ||
138 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | 139 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
139 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | 140 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) |
@@ -167,6 +168,8 @@ __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) | |||
167 | { | 168 | { |
168 | void __iomem *reg_base = ioat->base.reg_base; | 169 | void __iomem *reg_base = ioat->base.reg_base; |
169 | 170 | ||
171 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", | ||
172 | __func__, ioat->pending); | ||
170 | ioat->pending = 0; | 173 | ioat->pending = 0; |
171 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | 174 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); |
172 | } | 175 | } |
@@ -251,6 +254,7 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) | |||
251 | if (!ioat->used_desc.prev) | 254 | if (!ioat->used_desc.prev) |
252 | return; | 255 | return; |
253 | 256 | ||
257 | dev_dbg(to_dev(chan), "%s\n", __func__); | ||
254 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); | 258 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); |
255 | chansts = (chan->completion_virt->low | 259 | chansts = (chan->completion_virt->low |
256 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); | 260 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); |
@@ -382,6 +386,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
382 | cookie = 1; | 386 | cookie = 1; |
383 | c->cookie = cookie; | 387 | c->cookie = cookie; |
384 | tx->cookie = cookie; | 388 | tx->cookie = cookie; |
389 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | ||
385 | 390 | ||
386 | /* write address into NextDescriptor field of last desc in chain */ | 391 | /* write address into NextDescriptor field of last desc in chain */ |
387 | first = to_ioat_desc(tx->tx_list.next); | 392 | first = to_ioat_desc(tx->tx_list.next); |
@@ -390,6 +395,8 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
390 | wmb(); | 395 | wmb(); |
391 | chain_tail->hw->next = first->txd.phys; | 396 | chain_tail->hw->next = first->txd.phys; |
392 | list_splice_tail_init(&tx->tx_list, &ioat->used_desc); | 397 | list_splice_tail_init(&tx->tx_list, &ioat->used_desc); |
398 | dump_desc_dbg(ioat, chain_tail); | ||
399 | dump_desc_dbg(ioat, first); | ||
393 | 400 | ||
394 | ioat->pending += desc->tx_cnt; | 401 | ioat->pending += desc->tx_cnt; |
395 | if (ioat->pending >= ioat_pending_level) | 402 | if (ioat->pending >= ioat_pending_level) |
@@ -429,6 +436,7 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) | |||
429 | desc_sw->txd.tx_submit = ioat1_tx_submit; | 436 | desc_sw->txd.tx_submit = ioat1_tx_submit; |
430 | desc_sw->hw = desc; | 437 | desc_sw->hw = desc; |
431 | desc_sw->txd.phys = phys; | 438 | desc_sw->txd.phys = phys; |
439 | set_desc_id(desc_sw, -1); | ||
432 | 440 | ||
433 | return desc_sw; | 441 | return desc_sw; |
434 | } | 442 | } |
@@ -474,6 +482,7 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) | |||
474 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); | 482 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); |
475 | break; | 483 | break; |
476 | } | 484 | } |
485 | set_desc_id(desc, i); | ||
477 | list_add_tail(&desc->node, &tmp_list); | 486 | list_add_tail(&desc->node, &tmp_list); |
478 | } | 487 | } |
479 | spin_lock_bh(&ioat->desc_lock); | 488 | spin_lock_bh(&ioat->desc_lock); |
@@ -495,6 +504,8 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) | |||
495 | 504 | ||
496 | tasklet_enable(&chan->cleanup_task); | 505 | tasklet_enable(&chan->cleanup_task); |
497 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ | 506 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ |
507 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | ||
508 | __func__, ioat->desccount); | ||
498 | return ioat->desccount; | 509 | return ioat->desccount; |
499 | } | 510 | } |
500 | 511 | ||
@@ -527,8 +538,10 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) | |||
527 | mdelay(100); | 538 | mdelay(100); |
528 | 539 | ||
529 | spin_lock_bh(&ioat->desc_lock); | 540 | spin_lock_bh(&ioat->desc_lock); |
530 | list_for_each_entry_safe(desc, _desc, | 541 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { |
531 | &ioat->used_desc, node) { | 542 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", |
543 | __func__, desc_id(desc)); | ||
544 | dump_desc_dbg(ioat, desc); | ||
532 | in_use_descs++; | 545 | in_use_descs++; |
533 | list_del(&desc->node); | 546 | list_del(&desc->node); |
534 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | 547 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
@@ -585,7 +598,8 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) | |||
585 | return NULL; | 598 | return NULL; |
586 | } | 599 | } |
587 | } | 600 | } |
588 | 601 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", | |
602 | __func__, desc_id(new)); | ||
589 | prefetch(new->hw); | 603 | prefetch(new->hw); |
590 | return new; | 604 | return new; |
591 | } | 605 | } |
@@ -630,6 +644,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | |||
630 | async_tx_ack(&desc->txd); | 644 | async_tx_ack(&desc->txd); |
631 | next = ioat1_dma_get_next_descriptor(ioat); | 645 | next = ioat1_dma_get_next_descriptor(ioat); |
632 | hw->next = next ? next->txd.phys : 0; | 646 | hw->next = next ? next->txd.phys : 0; |
647 | dump_desc_dbg(ioat, desc); | ||
633 | desc = next; | 648 | desc = next; |
634 | } else | 649 | } else |
635 | hw->next = 0; | 650 | hw->next = 0; |
@@ -652,6 +667,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | |||
652 | list_splice(&chain, &desc->txd.tx_list); | 667 | list_splice(&chain, &desc->txd.tx_list); |
653 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | 668 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); |
654 | hw->ctl_f.compl_write = 1; | 669 | hw->ctl_f.compl_write = 1; |
670 | dump_desc_dbg(ioat, desc); | ||
655 | 671 | ||
656 | return &desc->txd; | 672 | return &desc->txd; |
657 | } | 673 | } |
@@ -707,6 +723,9 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | |||
707 | phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; | 723 | phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; |
708 | #endif | 724 | #endif |
709 | 725 | ||
726 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, | ||
727 | (unsigned long long) phys_complete); | ||
728 | |||
710 | if ((chan->completion_virt->full | 729 | if ((chan->completion_virt->full |
711 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | 730 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == |
712 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { | 731 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { |
@@ -758,6 +777,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |||
758 | return; | 777 | return; |
759 | } | 778 | } |
760 | 779 | ||
780 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", | ||
781 | __func__, phys_complete); | ||
761 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { | 782 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { |
762 | tx = &desc->txd; | 783 | tx = &desc->txd; |
763 | /* | 784 | /* |
@@ -765,6 +786,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |||
765 | * due to exceeding xfercap, perhaps. If so, only the | 786 | * due to exceeding xfercap, perhaps. If so, only the |
766 | * last one will have a cookie, and require unmapping. | 787 | * last one will have a cookie, and require unmapping. |
767 | */ | 788 | */ |
789 | dump_desc_dbg(ioat, desc); | ||
768 | if (tx->cookie) { | 790 | if (tx->cookie) { |
769 | cookie = tx->cookie; | 791 | cookie = tx->cookie; |
770 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 792 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
@@ -848,6 +870,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | |||
848 | async_tx_ack(&desc->txd); | 870 | async_tx_ack(&desc->txd); |
849 | hw->next = 0; | 871 | hw->next = 0; |
850 | list_add_tail(&desc->node, &ioat->used_desc); | 872 | list_add_tail(&desc->node, &ioat->used_desc); |
873 | dump_desc_dbg(ioat, desc); | ||
851 | 874 | ||
852 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, | 875 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, |
853 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | 876 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); |