aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 15:00:55 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:30:23 -0400
commit6df9183a153291a2585a8dfe67597fc18c201147 (patch)
tree5e5f3b3da9308e20f2dda71c85242460bb7cacfa /drivers/dma
parent38e12f64a165e83617c21dae3c15972fd8d639f5 (diff)
ioat: add some dev_dbg() calls
Provide some output for debugging the driver. Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ioat/dma.c29
-rw-r--r--drivers/dma/ioat/dma.h28
-rw-r--r--drivers/dma/ioat/dma_v2.c25
-rw-r--r--drivers/dma/ioat/dma_v2.h3
4 files changed, 81 insertions, 4 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 696d4de3bb8f..edf4f5e5de73 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -134,6 +134,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
135 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 135 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
136 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 136 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
137 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
137 138
138#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 139#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
139 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) 140 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
@@ -167,6 +168,8 @@ __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
167{ 168{
168 void __iomem *reg_base = ioat->base.reg_base; 169 void __iomem *reg_base = ioat->base.reg_base;
169 170
171 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
172 __func__, ioat->pending);
170 ioat->pending = 0; 173 ioat->pending = 0;
171 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); 174 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
172} 175}
@@ -251,6 +254,7 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
251 if (!ioat->used_desc.prev) 254 if (!ioat->used_desc.prev)
252 return; 255 return;
253 256
257 dev_dbg(to_dev(chan), "%s\n", __func__);
254 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); 258 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
255 chansts = (chan->completion_virt->low 259 chansts = (chan->completion_virt->low
256 & IOAT_CHANSTS_DMA_TRANSFER_STATUS); 260 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
@@ -382,6 +386,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
382 cookie = 1; 386 cookie = 1;
383 c->cookie = cookie; 387 c->cookie = cookie;
384 tx->cookie = cookie; 388 tx->cookie = cookie;
389 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
385 390
386 /* write address into NextDescriptor field of last desc in chain */ 391 /* write address into NextDescriptor field of last desc in chain */
387 first = to_ioat_desc(tx->tx_list.next); 392 first = to_ioat_desc(tx->tx_list.next);
@@ -390,6 +395,8 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
390 wmb(); 395 wmb();
391 chain_tail->hw->next = first->txd.phys; 396 chain_tail->hw->next = first->txd.phys;
392 list_splice_tail_init(&tx->tx_list, &ioat->used_desc); 397 list_splice_tail_init(&tx->tx_list, &ioat->used_desc);
398 dump_desc_dbg(ioat, chain_tail);
399 dump_desc_dbg(ioat, first);
393 400
394 ioat->pending += desc->tx_cnt; 401 ioat->pending += desc->tx_cnt;
395 if (ioat->pending >= ioat_pending_level) 402 if (ioat->pending >= ioat_pending_level)
@@ -429,6 +436,7 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
429 desc_sw->txd.tx_submit = ioat1_tx_submit; 436 desc_sw->txd.tx_submit = ioat1_tx_submit;
430 desc_sw->hw = desc; 437 desc_sw->hw = desc;
431 desc_sw->txd.phys = phys; 438 desc_sw->txd.phys = phys;
439 set_desc_id(desc_sw, -1);
432 440
433 return desc_sw; 441 return desc_sw;
434} 442}
@@ -474,6 +482,7 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
474 dev_err(to_dev(chan), "Only %d initial descriptors\n", i); 482 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
475 break; 483 break;
476 } 484 }
485 set_desc_id(desc, i);
477 list_add_tail(&desc->node, &tmp_list); 486 list_add_tail(&desc->node, &tmp_list);
478 } 487 }
479 spin_lock_bh(&ioat->desc_lock); 488 spin_lock_bh(&ioat->desc_lock);
@@ -495,6 +504,8 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
495 504
496 tasklet_enable(&chan->cleanup_task); 505 tasklet_enable(&chan->cleanup_task);
497 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ 506 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
507 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
508 __func__, ioat->desccount);
498 return ioat->desccount; 509 return ioat->desccount;
499} 510}
500 511
@@ -527,8 +538,10 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
527 mdelay(100); 538 mdelay(100);
528 539
529 spin_lock_bh(&ioat->desc_lock); 540 spin_lock_bh(&ioat->desc_lock);
530 list_for_each_entry_safe(desc, _desc, 541 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
531 &ioat->used_desc, node) { 542 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
543 __func__, desc_id(desc));
544 dump_desc_dbg(ioat, desc);
532 in_use_descs++; 545 in_use_descs++;
533 list_del(&desc->node); 546 list_del(&desc->node);
534 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 547 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
@@ -585,7 +598,8 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
585 return NULL; 598 return NULL;
586 } 599 }
587 } 600 }
588 601 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
602 __func__, desc_id(new));
589 prefetch(new->hw); 603 prefetch(new->hw);
590 return new; 604 return new;
591} 605}
@@ -630,6 +644,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
630 async_tx_ack(&desc->txd); 644 async_tx_ack(&desc->txd);
631 next = ioat1_dma_get_next_descriptor(ioat); 645 next = ioat1_dma_get_next_descriptor(ioat);
632 hw->next = next ? next->txd.phys : 0; 646 hw->next = next ? next->txd.phys : 0;
647 dump_desc_dbg(ioat, desc);
633 desc = next; 648 desc = next;
634 } else 649 } else
635 hw->next = 0; 650 hw->next = 0;
@@ -652,6 +667,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
652 list_splice(&chain, &desc->txd.tx_list); 667 list_splice(&chain, &desc->txd.tx_list);
653 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 668 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
654 hw->ctl_f.compl_write = 1; 669 hw->ctl_f.compl_write = 1;
670 dump_desc_dbg(ioat, desc);
655 671
656 return &desc->txd; 672 return &desc->txd;
657} 673}
@@ -707,6 +723,9 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
707 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; 723 phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
708#endif 724#endif
709 725
726 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
727 (unsigned long long) phys_complete);
728
710 if ((chan->completion_virt->full 729 if ((chan->completion_virt->full
711 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == 730 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
712 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { 731 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
@@ -758,6 +777,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
758 return; 777 return;
759 } 778 }
760 779
780 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
781 __func__, phys_complete);
761 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { 782 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
762 tx = &desc->txd; 783 tx = &desc->txd;
763 /* 784 /*
@@ -765,6 +786,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
765 * due to exceeding xfercap, perhaps. If so, only the 786 * due to exceeding xfercap, perhaps. If so, only the
766 * last one will have a cookie, and require unmapping. 787 * last one will have a cookie, and require unmapping.
767 */ 788 */
789 dump_desc_dbg(ioat, desc);
768 if (tx->cookie) { 790 if (tx->cookie) {
769 cookie = tx->cookie; 791 cookie = tx->cookie;
770 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 792 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
@@ -848,6 +870,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
848 async_tx_ack(&desc->txd); 870 async_tx_ack(&desc->txd);
849 hw->next = 0; 871 hw->next = 0;
850 list_add_tail(&desc->node, &ioat->used_desc); 872 list_add_tail(&desc->node, &ioat->used_desc);
873 dump_desc_dbg(ioat, desc);
851 874
852 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, 875 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
853 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); 876 chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index fa15e77652a0..9f9edc2cd079 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -173,6 +173,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
173 * or attached to a transaction list (async_tx.tx_list) 173 * or attached to a transaction list (async_tx.tx_list)
174 * @tx_cnt: number of descriptors required to complete the transaction 174 * @tx_cnt: number of descriptors required to complete the transaction
175 * @txd: the generic software descriptor for all engines 175 * @txd: the generic software descriptor for all engines
176 * @id: identifier for debug
176 */ 177 */
177struct ioat_desc_sw { 178struct ioat_desc_sw {
178 struct ioat_dma_descriptor *hw; 179 struct ioat_dma_descriptor *hw;
@@ -180,8 +181,35 @@ struct ioat_desc_sw {
180 int tx_cnt; 181 int tx_cnt;
181 size_t len; 182 size_t len;
182 struct dma_async_tx_descriptor txd; 183 struct dma_async_tx_descriptor txd;
184 #ifdef DEBUG
185 int id;
186 #endif
183}; 187};
184 188
189#ifdef DEBUG
190#define set_desc_id(desc, i) ((desc)->id = (i))
191#define desc_id(desc) ((desc)->id)
192#else
193#define set_desc_id(desc, i)
194#define desc_id(desc) (0)
195#endif
196
197static inline void
198__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
199 struct dma_async_tx_descriptor *tx, int id)
200{
201 struct device *dev = to_dev(chan);
202
203 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
204 " ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
205 (unsigned long long) tx->phys,
206 (unsigned long long) hw->next, tx->cookie, tx->flags,
207 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
208}
209
210#define dump_desc_dbg(c, d) \
211 ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
212
185static inline void ioat_set_tcp_copy_break(unsigned long copybreak) 213static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
186{ 214{
187 #ifdef CONFIG_NET_DMA 215 #ifdef CONFIG_NET_DMA
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 49ba1c73d95e..58881860f400 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -54,7 +54,9 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
54 /* make descriptor updates globally visible before notifying channel */ 54 /* make descriptor updates globally visible before notifying channel */
55 wmb(); 55 wmb();
56 writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 56 writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
57 57 dev_dbg(to_dev(&ioat->base),
58 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
59 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
58} 60}
59 61
60static void ioat2_issue_pending(struct dma_chan *chan) 62static void ioat2_issue_pending(struct dma_chan *chan)
@@ -101,6 +103,8 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
101 return; 103 return;
102 } 104 }
103 105
106 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
107 __func__, ioat->head, ioat->tail, ioat->issued);
104 idx = ioat2_desc_alloc(ioat, 1); 108 idx = ioat2_desc_alloc(ioat, 1);
105 desc = ioat2_get_ring_ent(ioat, idx); 109 desc = ioat2_get_ring_ent(ioat, idx);
106 110
@@ -118,6 +122,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
118 reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 122 reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
119 writel(((u64) desc->txd.phys) >> 32, 123 writel(((u64) desc->txd.phys) >> 32,
120 reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); 124 reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
125 dump_desc_dbg(ioat, desc);
121 __ioat2_issue_pending(ioat); 126 __ioat2_issue_pending(ioat);
122} 127}
123 128
@@ -154,6 +159,10 @@ static void ioat2_reset_part2(struct work_struct *work)
154 ioat->issued = ioat->tail; 159 ioat->issued = ioat->tail;
155 ioat->dmacount = 0; 160 ioat->dmacount = 0;
156 161
162 dev_dbg(to_dev(&ioat->base),
163 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
164 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
165
157 if (ioat2_ring_pending(ioat)) { 166 if (ioat2_ring_pending(ioat)) {
158 struct ioat_ring_ent *desc; 167 struct ioat_ring_ent *desc;
159 168
@@ -221,6 +230,8 @@ static void ioat2_chan_watchdog(struct work_struct *work)
221 u16 active; 230 u16 active;
222 int i; 231 int i;
223 232
233 dev_dbg(&device->pdev->dev, "%s\n", __func__);
234
224 for (i = 0; i < device->common.chancnt; i++) { 235 for (i = 0; i < device->common.chancnt; i++) {
225 chan = ioat_chan_by_index(device, i); 236 chan = ioat_chan_by_index(device, i);
226 ioat = container_of(chan, struct ioat2_dma_chan, base); 237 ioat = container_of(chan, struct ioat2_dma_chan, base);
@@ -295,11 +306,15 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
295 306
296 spin_lock_bh(&ioat->ring_lock); 307 spin_lock_bh(&ioat->ring_lock);
297 308
309 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
310 __func__, ioat->head, ioat->tail, ioat->issued);
311
298 active = ioat2_ring_active(ioat); 312 active = ioat2_ring_active(ioat);
299 for (i = 0; i < active && !seen_current; i++) { 313 for (i = 0; i < active && !seen_current; i++) {
300 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); 314 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
301 desc = ioat2_get_ring_ent(ioat, ioat->tail + i); 315 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
302 tx = &desc->txd; 316 tx = &desc->txd;
317 dump_desc_dbg(ioat, desc);
303 if (tx->cookie) { 318 if (tx->cookie) {
304 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 319 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
305 chan->completed_cookie = tx->cookie; 320 chan->completed_cookie = tx->cookie;
@@ -348,6 +363,7 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device)
348 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 363 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
349 if (xfercap_log == 0) 364 if (xfercap_log == 0)
350 return 0; 365 return 0;
366 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
351 367
352 /* FIXME which i/oat version is i7300? */ 368 /* FIXME which i/oat version is i7300? */
353#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 369#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
@@ -381,6 +397,8 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
381 cookie = 1; 397 cookie = 1;
382 tx->cookie = cookie; 398 tx->cookie = cookie;
383 c->cookie = cookie; 399 c->cookie = cookie;
400 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
401
384 ioat2_update_pending(ioat); 402 ioat2_update_pending(ioat);
385 spin_unlock_bh(&ioat->ring_lock); 403 spin_unlock_bh(&ioat->ring_lock);
386 404
@@ -480,6 +498,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c)
480 kfree(ring); 498 kfree(ring);
481 return -ENOMEM; 499 return -ENOMEM;
482 } 500 }
501 set_desc_id(ring[i], i);
483 } 502 }
484 503
485 /* link descs */ 504 /* link descs */
@@ -571,12 +590,14 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
571 len -= copy; 590 len -= copy;
572 dst += copy; 591 dst += copy;
573 src += copy; 592 src += copy;
593 dump_desc_dbg(ioat, desc);
574 } 594 }
575 595
576 desc->txd.flags = flags; 596 desc->txd.flags = flags;
577 desc->len = total_len; 597 desc->len = total_len;
578 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 598 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
579 hw->ctl_f.compl_write = 1; 599 hw->ctl_f.compl_write = 1;
600 dump_desc_dbg(ioat, desc);
580 /* we leave the channel locked to ensure in order submission */ 601 /* we leave the channel locked to ensure in order submission */
581 602
582 return &desc->txd; 603 return &desc->txd;
@@ -614,6 +635,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
614 635
615 spin_lock_bh(&ioat->ring_lock); 636 spin_lock_bh(&ioat->ring_lock);
616 descs = ioat2_ring_space(ioat); 637 descs = ioat2_ring_space(ioat);
638 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
617 for (i = 0; i < descs; i++) { 639 for (i = 0; i < descs; i++) {
618 desc = ioat2_get_ring_ent(ioat, ioat->head + i); 640 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
619 ioat2_free_ring_ent(desc, c); 641 ioat2_free_ring_ent(desc, c);
@@ -625,6 +647,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c)
625 647
626 for (i = 0; i < total_descs - descs; i++) { 648 for (i = 0; i < total_descs - descs; i++) {
627 desc = ioat2_get_ring_ent(ioat, ioat->tail + i); 649 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
650 dump_desc_dbg(ioat, desc);
628 ioat2_free_ring_ent(desc, c); 651 ioat2_free_ring_ent(desc, c);
629 } 652 }
630 653
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 94a553eacdbd..c72ccb5dfd5b 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -116,6 +116,9 @@ struct ioat_ring_ent {
116 struct ioat_dma_descriptor *hw; 116 struct ioat_dma_descriptor *hw;
117 struct dma_async_tx_descriptor txd; 117 struct dma_async_tx_descriptor txd;
118 size_t len; 118 size_t len;
119 #ifdef DEBUG
120 int id;
121 #endif
119}; 122};
120 123
121static inline struct ioat_ring_ent * 124static inline struct ioat_ring_ent *