diff options
-rw-r--r-- | drivers/dma/ioat/dma.c | 304 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 7 |
2 files changed, 144 insertions, 167 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 65f8b7492a4d..462dae627191 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -55,9 +55,8 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | |||
55 | static struct ioat_desc_sw * | 55 | static struct ioat_desc_sw * |
56 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | 56 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); |
57 | 57 | ||
58 | static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( | 58 | static inline struct ioat_dma_chan * |
59 | struct ioatdma_device *device, | 59 | ioat_chan_by_index(struct ioatdma_device *device, int index) |
60 | int index) | ||
61 | { | 60 | { |
62 | return device->idx[index]; | 61 | return device->idx[index]; |
63 | } | 62 | } |
@@ -87,7 +86,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | |||
87 | 86 | ||
88 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | 87 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); |
89 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | 88 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { |
90 | ioat_chan = ioat_lookup_chan_by_index(instance, bit); | 89 | ioat_chan = ioat_chan_by_index(instance, bit); |
91 | tasklet_schedule(&ioat_chan->cleanup_task); | 90 | tasklet_schedule(&ioat_chan->cleanup_task); |
92 | } | 91 | } |
93 | 92 | ||
@@ -205,8 +204,8 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
205 | * descriptors to hw | 204 | * descriptors to hw |
206 | * @chan: DMA channel handle | 205 | * @chan: DMA channel handle |
207 | */ | 206 | */ |
208 | static inline void __ioat1_dma_memcpy_issue_pending( | 207 | static inline void |
209 | struct ioat_dma_chan *ioat_chan) | 208 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) |
210 | { | 209 | { |
211 | ioat_chan->pending = 0; | 210 | ioat_chan->pending = 0; |
212 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); | 211 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); |
@@ -223,8 +222,8 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |||
223 | } | 222 | } |
224 | } | 223 | } |
225 | 224 | ||
226 | static inline void __ioat2_dma_memcpy_issue_pending( | 225 | static inline void |
227 | struct ioat_dma_chan *ioat_chan) | 226 | __ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) |
228 | { | 227 | { |
229 | ioat_chan->pending = 0; | 228 | ioat_chan->pending = 0; |
230 | writew(ioat_chan->dmacount, | 229 | writew(ioat_chan->dmacount, |
@@ -279,18 +278,18 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) | |||
279 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | 278 | desc = to_ioat_desc(ioat_chan->used_desc.prev); |
280 | switch (ioat_chan->device->version) { | 279 | switch (ioat_chan->device->version) { |
281 | case IOAT_VER_1_2: | 280 | case IOAT_VER_1_2: |
282 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | 281 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, |
283 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | 282 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); |
284 | writel(((u64) desc->async_tx.phys) >> 32, | 283 | writel(((u64) desc->txd.phys) >> 32, |
285 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | 284 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); |
286 | 285 | ||
287 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | 286 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base |
288 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | 287 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); |
289 | break; | 288 | break; |
290 | case IOAT_VER_2_0: | 289 | case IOAT_VER_2_0: |
291 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | 290 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, |
292 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | 291 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); |
293 | writel(((u64) desc->async_tx.phys) >> 32, | 292 | writel(((u64) desc->txd.phys) >> 32, |
294 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | 293 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); |
295 | 294 | ||
296 | /* tell the engine to go with what's left to be done */ | 295 | /* tell the engine to go with what's left to be done */ |
@@ -299,7 +298,7 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) | |||
299 | 298 | ||
300 | break; | 299 | break; |
301 | } | 300 | } |
302 | dev_err(&ioat_chan->device->pdev->dev, | 301 | dev_err(to_dev(ioat_chan), |
303 | "chan%d reset - %d descs waiting, %d total desc\n", | 302 | "chan%d reset - %d descs waiting, %d total desc\n", |
304 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | 303 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); |
305 | 304 | ||
@@ -322,7 +321,7 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) | |||
322 | chansts = (ioat_chan->completion_virt->low | 321 | chansts = (ioat_chan->completion_virt->low |
323 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); | 322 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); |
324 | if (chanerr) { | 323 | if (chanerr) { |
325 | dev_err(&ioat_chan->device->pdev->dev, | 324 | dev_err(to_dev(ioat_chan), |
326 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | 325 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", |
327 | chan_num(ioat_chan), chansts, chanerr); | 326 | chan_num(ioat_chan), chansts, chanerr); |
328 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 327 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
@@ -367,7 +366,7 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) | |||
367 | unsigned long compl_desc_addr_hw; | 366 | unsigned long compl_desc_addr_hw; |
368 | 367 | ||
369 | for (i = 0; i < device->common.chancnt; i++) { | 368 | for (i = 0; i < device->common.chancnt; i++) { |
370 | ioat_chan = ioat_lookup_chan_by_index(device, i); | 369 | ioat_chan = ioat_chan_by_index(device, i); |
371 | 370 | ||
372 | if (ioat_chan->device->version == IOAT_VER_1_2 | 371 | if (ioat_chan->device->version == IOAT_VER_1_2 |
373 | /* have we started processing anything yet */ | 372 | /* have we started processing anything yet */ |
@@ -475,7 +474,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
475 | len = first->len; | 474 | len = first->len; |
476 | src = first->src; | 475 | src = first->src; |
477 | dst = first->dst; | 476 | dst = first->dst; |
478 | orig_flags = first->async_tx.flags; | 477 | orig_flags = first->txd.flags; |
479 | new = first; | 478 | new = first; |
480 | 479 | ||
481 | spin_lock_bh(&ioat_chan->desc_lock); | 480 | spin_lock_bh(&ioat_chan->desc_lock); |
@@ -484,7 +483,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
484 | do { | 483 | do { |
485 | copy = min_t(size_t, len, ioat_chan->xfercap); | 484 | copy = min_t(size_t, len, ioat_chan->xfercap); |
486 | 485 | ||
487 | async_tx_ack(&new->async_tx); | 486 | async_tx_ack(&new->txd); |
488 | 487 | ||
489 | hw = new->hw; | 488 | hw = new->hw; |
490 | hw->size = copy; | 489 | hw->size = copy; |
@@ -495,7 +494,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
495 | 494 | ||
496 | /* chain together the physical address list for the HW */ | 495 | /* chain together the physical address list for the HW */ |
497 | wmb(); | 496 | wmb(); |
498 | prev->hw->next = (u64) new->async_tx.phys; | 497 | prev->hw->next = (u64) new->txd.phys; |
499 | 498 | ||
500 | len -= copy; | 499 | len -= copy; |
501 | dst += copy; | 500 | dst += copy; |
@@ -507,27 +506,26 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
507 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); | 506 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); |
508 | 507 | ||
509 | if (!new) { | 508 | if (!new) { |
510 | dev_err(&ioat_chan->device->pdev->dev, | 509 | dev_err(to_dev(ioat_chan), "tx submit failed\n"); |
511 | "tx submit failed\n"); | ||
512 | spin_unlock_bh(&ioat_chan->desc_lock); | 510 | spin_unlock_bh(&ioat_chan->desc_lock); |
513 | return -ENOMEM; | 511 | return -ENOMEM; |
514 | } | 512 | } |
515 | 513 | ||
516 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | 514 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
517 | if (first->async_tx.callback) { | 515 | if (first->txd.callback) { |
518 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | 516 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
519 | if (first != new) { | 517 | if (first != new) { |
520 | /* move callback into to last desc */ | 518 | /* move callback into to last desc */ |
521 | new->async_tx.callback = first->async_tx.callback; | 519 | new->txd.callback = first->txd.callback; |
522 | new->async_tx.callback_param | 520 | new->txd.callback_param |
523 | = first->async_tx.callback_param; | 521 | = first->txd.callback_param; |
524 | first->async_tx.callback = NULL; | 522 | first->txd.callback = NULL; |
525 | first->async_tx.callback_param = NULL; | 523 | first->txd.callback_param = NULL; |
526 | } | 524 | } |
527 | } | 525 | } |
528 | 526 | ||
529 | new->tx_cnt = desc_count; | 527 | new->tx_cnt = desc_count; |
530 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | 528 | new->txd.flags = orig_flags; /* client is in control of this ack */ |
531 | 529 | ||
532 | /* store the original values for use in later cleanup */ | 530 | /* store the original values for use in later cleanup */ |
533 | if (new != first) { | 531 | if (new != first) { |
@@ -541,11 +539,11 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
541 | cookie++; | 539 | cookie++; |
542 | if (cookie < 0) | 540 | if (cookie < 0) |
543 | cookie = 1; | 541 | cookie = 1; |
544 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | 542 | ioat_chan->common.cookie = new->txd.cookie = cookie; |
545 | 543 | ||
546 | /* write address into NextDescriptor field of last desc in chain */ | 544 | /* write address into NextDescriptor field of last desc in chain */ |
547 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = | 545 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = |
548 | first->async_tx.phys; | 546 | first->txd.phys; |
549 | list_splice_tail(&new_chain, &ioat_chan->used_desc); | 547 | list_splice_tail(&new_chain, &ioat_chan->used_desc); |
550 | 548 | ||
551 | ioat_chan->dmacount += desc_count; | 549 | ioat_chan->dmacount += desc_count; |
@@ -574,7 +572,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
574 | len = first->len; | 572 | len = first->len; |
575 | src = first->src; | 573 | src = first->src; |
576 | dst = first->dst; | 574 | dst = first->dst; |
577 | orig_flags = first->async_tx.flags; | 575 | orig_flags = first->txd.flags; |
578 | new = first; | 576 | new = first; |
579 | 577 | ||
580 | /* | 578 | /* |
@@ -584,7 +582,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
584 | do { | 582 | do { |
585 | copy = min_t(size_t, len, ioat_chan->xfercap); | 583 | copy = min_t(size_t, len, ioat_chan->xfercap); |
586 | 584 | ||
587 | async_tx_ack(&new->async_tx); | 585 | async_tx_ack(&new->txd); |
588 | 586 | ||
589 | hw = new->hw; | 587 | hw = new->hw; |
590 | hw->size = copy; | 588 | hw->size = copy; |
@@ -599,27 +597,26 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
599 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); | 597 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); |
600 | 598 | ||
601 | if (!new) { | 599 | if (!new) { |
602 | dev_err(&ioat_chan->device->pdev->dev, | 600 | dev_err(to_dev(ioat_chan), "tx submit failed\n"); |
603 | "tx submit failed\n"); | ||
604 | spin_unlock_bh(&ioat_chan->desc_lock); | 601 | spin_unlock_bh(&ioat_chan->desc_lock); |
605 | return -ENOMEM; | 602 | return -ENOMEM; |
606 | } | 603 | } |
607 | 604 | ||
608 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | 605 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
609 | if (first->async_tx.callback) { | 606 | if (first->txd.callback) { |
610 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | 607 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; |
611 | if (first != new) { | 608 | if (first != new) { |
612 | /* move callback into to last desc */ | 609 | /* move callback into to last desc */ |
613 | new->async_tx.callback = first->async_tx.callback; | 610 | new->txd.callback = first->txd.callback; |
614 | new->async_tx.callback_param | 611 | new->txd.callback_param |
615 | = first->async_tx.callback_param; | 612 | = first->txd.callback_param; |
616 | first->async_tx.callback = NULL; | 613 | first->txd.callback = NULL; |
617 | first->async_tx.callback_param = NULL; | 614 | first->txd.callback_param = NULL; |
618 | } | 615 | } |
619 | } | 616 | } |
620 | 617 | ||
621 | new->tx_cnt = desc_count; | 618 | new->tx_cnt = desc_count; |
622 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | 619 | new->txd.flags = orig_flags; /* client is in control of this ack */ |
623 | 620 | ||
624 | /* store the original values for use in later cleanup */ | 621 | /* store the original values for use in later cleanup */ |
625 | if (new != first) { | 622 | if (new != first) { |
@@ -633,7 +630,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
633 | cookie++; | 630 | cookie++; |
634 | if (cookie < 0) | 631 | if (cookie < 0) |
635 | cookie = 1; | 632 | cookie = 1; |
636 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | 633 | ioat_chan->common.cookie = new->txd.cookie = cookie; |
637 | 634 | ||
638 | ioat_chan->dmacount += desc_count; | 635 | ioat_chan->dmacount += desc_count; |
639 | ioat_chan->pending += desc_count; | 636 | ioat_chan->pending += desc_count; |
@@ -649,9 +646,8 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
649 | * @ioat_chan: the channel supplying the memory pool for the descriptors | 646 | * @ioat_chan: the channel supplying the memory pool for the descriptors |
650 | * @flags: allocation flags | 647 | * @flags: allocation flags |
651 | */ | 648 | */ |
652 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | 649 | static struct ioat_desc_sw * |
653 | struct ioat_dma_chan *ioat_chan, | 650 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) |
654 | gfp_t flags) | ||
655 | { | 651 | { |
656 | struct ioat_dma_descriptor *desc; | 652 | struct ioat_dma_descriptor *desc; |
657 | struct ioat_desc_sw *desc_sw; | 653 | struct ioat_desc_sw *desc_sw; |
@@ -670,19 +666,19 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | |||
670 | } | 666 | } |
671 | 667 | ||
672 | memset(desc, 0, sizeof(*desc)); | 668 | memset(desc, 0, sizeof(*desc)); |
673 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); | 669 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common); |
674 | switch (ioat_chan->device->version) { | 670 | switch (ioat_chan->device->version) { |
675 | case IOAT_VER_1_2: | 671 | case IOAT_VER_1_2: |
676 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | 672 | desc_sw->txd.tx_submit = ioat1_tx_submit; |
677 | break; | 673 | break; |
678 | case IOAT_VER_2_0: | 674 | case IOAT_VER_2_0: |
679 | case IOAT_VER_3_0: | 675 | case IOAT_VER_3_0: |
680 | desc_sw->async_tx.tx_submit = ioat2_tx_submit; | 676 | desc_sw->txd.tx_submit = ioat2_tx_submit; |
681 | break; | 677 | break; |
682 | } | 678 | } |
683 | 679 | ||
684 | desc_sw->hw = desc; | 680 | desc_sw->hw = desc; |
685 | desc_sw->async_tx.phys = phys; | 681 | desc_sw->txd.phys = phys; |
686 | 682 | ||
687 | return desc_sw; | 683 | return desc_sw; |
688 | } | 684 | } |
@@ -712,9 +708,9 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | |||
712 | 708 | ||
713 | /* circle link the hw descriptors */ | 709 | /* circle link the hw descriptors */ |
714 | desc = to_ioat_desc(ioat_chan->free_desc.next); | 710 | desc = to_ioat_desc(ioat_chan->free_desc.next); |
715 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | 711 | desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; |
716 | list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { | 712 | list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { |
717 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | 713 | desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; |
718 | } | 714 | } |
719 | } | 715 | } |
720 | 716 | ||
@@ -743,8 +739,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | |||
743 | 739 | ||
744 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 740 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
745 | if (chanerr) { | 741 | if (chanerr) { |
746 | dev_err(&ioat_chan->device->pdev->dev, | 742 | dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr); |
747 | "CHANERR = %x, clearing\n", chanerr); | ||
748 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 743 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
749 | } | 744 | } |
750 | 745 | ||
@@ -752,7 +747,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | |||
752 | for (i = 0; i < ioat_initial_desc_count; i++) { | 747 | for (i = 0; i < ioat_initial_desc_count; i++) { |
753 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); | 748 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); |
754 | if (!desc) { | 749 | if (!desc) { |
755 | dev_err(&ioat_chan->device->pdev->dev, | 750 | dev_err(to_dev(ioat_chan), |
756 | "Only %d initial descriptors\n", i); | 751 | "Only %d initial descriptors\n", i); |
757 | break; | 752 | break; |
758 | } | 753 | } |
@@ -819,14 +814,14 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
819 | in_use_descs++; | 814 | in_use_descs++; |
820 | list_del(&desc->node); | 815 | list_del(&desc->node); |
821 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | 816 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
822 | desc->async_tx.phys); | 817 | desc->txd.phys); |
823 | kfree(desc); | 818 | kfree(desc); |
824 | } | 819 | } |
825 | list_for_each_entry_safe(desc, _desc, | 820 | list_for_each_entry_safe(desc, _desc, |
826 | &ioat_chan->free_desc, node) { | 821 | &ioat_chan->free_desc, node) { |
827 | list_del(&desc->node); | 822 | list_del(&desc->node); |
828 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | 823 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
829 | desc->async_tx.phys); | 824 | desc->txd.phys); |
830 | kfree(desc); | 825 | kfree(desc); |
831 | } | 826 | } |
832 | break; | 827 | break; |
@@ -836,12 +831,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
836 | ioat_chan->free_desc.next, node) { | 831 | ioat_chan->free_desc.next, node) { |
837 | list_del(&desc->node); | 832 | list_del(&desc->node); |
838 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | 833 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
839 | desc->async_tx.phys); | 834 | desc->txd.phys); |
840 | kfree(desc); | 835 | kfree(desc); |
841 | } | 836 | } |
842 | desc = to_ioat_desc(ioat_chan->free_desc.next); | 837 | desc = to_ioat_desc(ioat_chan->free_desc.next); |
843 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | 838 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
844 | desc->async_tx.phys); | 839 | desc->txd.phys); |
845 | kfree(desc); | 840 | kfree(desc); |
846 | INIT_LIST_HEAD(&ioat_chan->free_desc); | 841 | INIT_LIST_HEAD(&ioat_chan->free_desc); |
847 | INIT_LIST_HEAD(&ioat_chan->used_desc); | 842 | INIT_LIST_HEAD(&ioat_chan->used_desc); |
@@ -855,8 +850,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
855 | 850 | ||
856 | /* one is ok since we left it on there on purpose */ | 851 | /* one is ok since we left it on there on purpose */ |
857 | if (in_use_descs > 1) | 852 | if (in_use_descs > 1) |
858 | dev_err(&ioat_chan->device->pdev->dev, | 853 | dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", |
859 | "Freeing %d in use descriptors!\n", | ||
860 | in_use_descs - 1); | 854 | in_use_descs - 1); |
861 | 855 | ||
862 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | 856 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; |
@@ -889,8 +883,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
889 | /* try to get another desc */ | 883 | /* try to get another desc */ |
890 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | 884 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); |
891 | if (!new) { | 885 | if (!new) { |
892 | dev_err(&ioat_chan->device->pdev->dev, | 886 | dev_err(to_dev(ioat_chan), "alloc failed\n"); |
893 | "alloc failed\n"); | ||
894 | return NULL; | 887 | return NULL; |
895 | } | 888 | } |
896 | } | 889 | } |
@@ -936,16 +929,15 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
936 | for (i = 16; i; i--) { | 929 | for (i = 16; i; i--) { |
937 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | 930 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); |
938 | if (!desc) { | 931 | if (!desc) { |
939 | dev_err(&ioat_chan->device->pdev->dev, | 932 | dev_err(to_dev(ioat_chan), "alloc failed\n"); |
940 | "alloc failed\n"); | ||
941 | break; | 933 | break; |
942 | } | 934 | } |
943 | list_add_tail(&desc->node, ioat_chan->used_desc.next); | 935 | list_add_tail(&desc->node, ioat_chan->used_desc.next); |
944 | 936 | ||
945 | desc->hw->next | 937 | desc->hw->next |
946 | = to_ioat_desc(desc->node.next)->async_tx.phys; | 938 | = to_ioat_desc(desc->node.next)->txd.phys; |
947 | to_ioat_desc(desc->node.prev)->hw->next | 939 | to_ioat_desc(desc->node.prev)->hw->next |
948 | = desc->async_tx.phys; | 940 | = desc->txd.phys; |
949 | ioat_chan->desccount++; | 941 | ioat_chan->desccount++; |
950 | } | 942 | } |
951 | 943 | ||
@@ -962,8 +954,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |||
962 | return new; | 954 | return new; |
963 | } | 955 | } |
964 | 956 | ||
965 | static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | 957 | static struct ioat_desc_sw * |
966 | struct ioat_dma_chan *ioat_chan) | 958 | ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) |
967 | { | 959 | { |
968 | if (!ioat_chan) | 960 | if (!ioat_chan) |
969 | return NULL; | 961 | return NULL; |
@@ -978,12 +970,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | |||
978 | return NULL; | 970 | return NULL; |
979 | } | 971 | } |
980 | 972 | ||
981 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | 973 | static struct dma_async_tx_descriptor * |
982 | struct dma_chan *chan, | 974 | ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, |
983 | dma_addr_t dma_dest, | 975 | dma_addr_t dma_src, size_t len, unsigned long flags) |
984 | dma_addr_t dma_src, | ||
985 | size_t len, | ||
986 | unsigned long flags) | ||
987 | { | 976 | { |
988 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 977 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
989 | struct ioat_desc_sw *new; | 978 | struct ioat_desc_sw *new; |
@@ -996,22 +985,19 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
996 | new->len = len; | 985 | new->len = len; |
997 | new->dst = dma_dest; | 986 | new->dst = dma_dest; |
998 | new->src = dma_src; | 987 | new->src = dma_src; |
999 | new->async_tx.flags = flags; | 988 | new->txd.flags = flags; |
1000 | return &new->async_tx; | 989 | return &new->txd; |
1001 | } else { | 990 | } else { |
1002 | dev_err(&ioat_chan->device->pdev->dev, | 991 | dev_err(to_dev(ioat_chan), |
1003 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | 992 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", |
1004 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | 993 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); |
1005 | return NULL; | 994 | return NULL; |
1006 | } | 995 | } |
1007 | } | 996 | } |
1008 | 997 | ||
1009 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | 998 | static struct dma_async_tx_descriptor * |
1010 | struct dma_chan *chan, | 999 | ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, |
1011 | dma_addr_t dma_dest, | 1000 | dma_addr_t dma_src, size_t len, unsigned long flags) |
1012 | dma_addr_t dma_src, | ||
1013 | size_t len, | ||
1014 | unsigned long flags) | ||
1015 | { | 1001 | { |
1016 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 1002 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
1017 | struct ioat_desc_sw *new; | 1003 | struct ioat_desc_sw *new; |
@@ -1028,11 +1014,11 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
1028 | new->len = len; | 1014 | new->len = len; |
1029 | new->dst = dma_dest; | 1015 | new->dst = dma_dest; |
1030 | new->src = dma_src; | 1016 | new->src = dma_src; |
1031 | new->async_tx.flags = flags; | 1017 | new->txd.flags = flags; |
1032 | return &new->async_tx; | 1018 | return &new->txd; |
1033 | } else { | 1019 | } else { |
1034 | spin_unlock_bh(&ioat_chan->desc_lock); | 1020 | spin_unlock_bh(&ioat_chan->desc_lock); |
1035 | dev_err(&ioat_chan->device->pdev->dev, | 1021 | dev_err(to_dev(ioat_chan), |
1036 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | 1022 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", |
1037 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | 1023 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); |
1038 | return NULL; | 1024 | return NULL; |
@@ -1050,8 +1036,8 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) | |||
1050 | static void | 1036 | static void |
1051 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | 1037 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) |
1052 | { | 1038 | { |
1053 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 1039 | if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
1054 | if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 1040 | if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
1055 | pci_unmap_single(ioat_chan->device->pdev, | 1041 | pci_unmap_single(ioat_chan->device->pdev, |
1056 | pci_unmap_addr(desc, dst), | 1042 | pci_unmap_addr(desc, dst), |
1057 | pci_unmap_len(desc, len), | 1043 | pci_unmap_len(desc, len), |
@@ -1063,8 +1049,8 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | |||
1063 | PCI_DMA_FROMDEVICE); | 1049 | PCI_DMA_FROMDEVICE); |
1064 | } | 1050 | } |
1065 | 1051 | ||
1066 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 1052 | if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
1067 | if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 1053 | if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
1068 | pci_unmap_single(ioat_chan->device->pdev, | 1054 | pci_unmap_single(ioat_chan->device->pdev, |
1069 | pci_unmap_addr(desc, src), | 1055 | pci_unmap_addr(desc, src), |
1070 | pci_unmap_len(desc, len), | 1056 | pci_unmap_len(desc, len), |
@@ -1088,6 +1074,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
1088 | dma_cookie_t cookie = 0; | 1074 | dma_cookie_t cookie = 0; |
1089 | unsigned long desc_phys; | 1075 | unsigned long desc_phys; |
1090 | struct ioat_desc_sw *latest_desc; | 1076 | struct ioat_desc_sw *latest_desc; |
1077 | struct dma_async_tx_descriptor *tx; | ||
1091 | 1078 | ||
1092 | prefetch(ioat_chan->completion_virt); | 1079 | prefetch(ioat_chan->completion_virt); |
1093 | 1080 | ||
@@ -1111,8 +1098,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
1111 | if ((ioat_chan->completion_virt->full | 1098 | if ((ioat_chan->completion_virt->full |
1112 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | 1099 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == |
1113 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { | 1100 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { |
1114 | dev_err(&ioat_chan->device->pdev->dev, | 1101 | dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", |
1115 | "Channel halted, chanerr = %x\n", | ||
1116 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); | 1102 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); |
1117 | 1103 | ||
1118 | /* TODO do something to salvage the situation */ | 1104 | /* TODO do something to salvage the situation */ |
@@ -1145,38 +1131,38 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
1145 | case IOAT_VER_1_2: | 1131 | case IOAT_VER_1_2: |
1146 | list_for_each_entry_safe(desc, _desc, | 1132 | list_for_each_entry_safe(desc, _desc, |
1147 | &ioat_chan->used_desc, node) { | 1133 | &ioat_chan->used_desc, node) { |
1148 | 1134 | tx = &desc->txd; | |
1149 | /* | 1135 | /* |
1150 | * Incoming DMA requests may use multiple descriptors, | 1136 | * Incoming DMA requests may use multiple descriptors, |
1151 | * due to exceeding xfercap, perhaps. If so, only the | 1137 | * due to exceeding xfercap, perhaps. If so, only the |
1152 | * last one will have a cookie, and require unmapping. | 1138 | * last one will have a cookie, and require unmapping. |
1153 | */ | 1139 | */ |
1154 | if (desc->async_tx.cookie) { | 1140 | if (tx->cookie) { |
1155 | cookie = desc->async_tx.cookie; | 1141 | cookie = tx->cookie; |
1156 | ioat_dma_unmap(ioat_chan, desc); | 1142 | ioat_dma_unmap(ioat_chan, desc); |
1157 | if (desc->async_tx.callback) { | 1143 | if (tx->callback) { |
1158 | desc->async_tx.callback(desc->async_tx.callback_param); | 1144 | tx->callback(tx->callback_param); |
1159 | desc->async_tx.callback = NULL; | 1145 | tx->callback = NULL; |
1160 | } | 1146 | } |
1161 | } | 1147 | } |
1162 | 1148 | ||
1163 | if (desc->async_tx.phys != phys_complete) { | 1149 | if (tx->phys != phys_complete) { |
1164 | /* | 1150 | /* |
1165 | * a completed entry, but not the last, so clean | 1151 | * a completed entry, but not the last, so clean |
1166 | * up if the client is done with the descriptor | 1152 | * up if the client is done with the descriptor |
1167 | */ | 1153 | */ |
1168 | if (async_tx_test_ack(&desc->async_tx)) { | 1154 | if (async_tx_test_ack(tx)) { |
1169 | list_move_tail(&desc->node, | 1155 | list_move_tail(&desc->node, |
1170 | &ioat_chan->free_desc); | 1156 | &ioat_chan->free_desc); |
1171 | } else | 1157 | } else |
1172 | desc->async_tx.cookie = 0; | 1158 | tx->cookie = 0; |
1173 | } else { | 1159 | } else { |
1174 | /* | 1160 | /* |
1175 | * last used desc. Do not remove, so we can | 1161 | * last used desc. Do not remove, so we can |
1176 | * append from it, but don't look at it next | 1162 | * append from it, but don't look at it next |
1177 | * time, either | 1163 | * time, either |
1178 | */ | 1164 | */ |
1179 | desc->async_tx.cookie = 0; | 1165 | tx->cookie = 0; |
1180 | 1166 | ||
1181 | /* TODO check status bits? */ | 1167 | /* TODO check status bits? */ |
1182 | break; | 1168 | break; |
@@ -1191,10 +1177,11 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
1191 | 1177 | ||
1192 | /* work backwards to find latest finished desc */ | 1178 | /* work backwards to find latest finished desc */ |
1193 | desc = to_ioat_desc(ioat_chan->used_desc.next); | 1179 | desc = to_ioat_desc(ioat_chan->used_desc.next); |
1180 | tx = &desc->txd; | ||
1194 | latest_desc = NULL; | 1181 | latest_desc = NULL; |
1195 | do { | 1182 | do { |
1196 | desc = to_ioat_desc(desc->node.prev); | 1183 | desc = to_ioat_desc(desc->node.prev); |
1197 | desc_phys = (unsigned long)desc->async_tx.phys | 1184 | desc_phys = (unsigned long)tx->phys |
1198 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | 1185 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; |
1199 | if (desc_phys == phys_complete) { | 1186 | if (desc_phys == phys_complete) { |
1200 | latest_desc = desc; | 1187 | latest_desc = desc; |
@@ -1203,19 +1190,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
1203 | } while (&desc->node != ioat_chan->used_desc.prev); | 1190 | } while (&desc->node != ioat_chan->used_desc.prev); |
1204 | 1191 | ||
1205 | if (latest_desc != NULL) { | 1192 | if (latest_desc != NULL) { |
1206 | |||
1207 | /* work forwards to clear finished descriptors */ | 1193 | /* work forwards to clear finished descriptors */ |
1208 | for (desc = to_ioat_desc(ioat_chan->used_desc.prev); | 1194 | for (desc = to_ioat_desc(ioat_chan->used_desc.prev); |
1209 | &desc->node != latest_desc->node.next && | 1195 | &desc->node != latest_desc->node.next && |
1210 | &desc->node != ioat_chan->used_desc.next; | 1196 | &desc->node != ioat_chan->used_desc.next; |
1211 | desc = to_ioat_desc(desc->node.next)) { | 1197 | desc = to_ioat_desc(desc->node.next)) { |
1212 | if (desc->async_tx.cookie) { | 1198 | if (tx->cookie) { |
1213 | cookie = desc->async_tx.cookie; | 1199 | cookie = tx->cookie; |
1214 | desc->async_tx.cookie = 0; | 1200 | tx->cookie = 0; |
1215 | ioat_dma_unmap(ioat_chan, desc); | 1201 | ioat_dma_unmap(ioat_chan, desc); |
1216 | if (desc->async_tx.callback) { | 1202 | if (tx->callback) { |
1217 | desc->async_tx.callback(desc->async_tx.callback_param); | 1203 | tx->callback(tx->callback_param); |
1218 | desc->async_tx.callback = NULL; | 1204 | tx->callback = NULL; |
1219 | } | 1205 | } |
1220 | } | 1206 | } |
1221 | } | 1207 | } |
@@ -1245,10 +1231,9 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
1245 | * @done: if not %NULL, updated with last completed transaction | 1231 | * @done: if not %NULL, updated with last completed transaction |
1246 | * @used: if not %NULL, updated with last used transaction | 1232 | * @used: if not %NULL, updated with last used transaction |
1247 | */ | 1233 | */ |
1248 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | 1234 | static enum dma_status |
1249 | dma_cookie_t cookie, | 1235 | ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, |
1250 | dma_cookie_t *done, | 1236 | dma_cookie_t *done, dma_cookie_t *used) |
1251 | dma_cookie_t *used) | ||
1252 | { | 1237 | { |
1253 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | 1238 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
1254 | dma_cookie_t last_used; | 1239 | dma_cookie_t last_used; |
@@ -1290,7 +1275,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
1290 | desc = ioat_dma_get_next_descriptor(ioat_chan); | 1275 | desc = ioat_dma_get_next_descriptor(ioat_chan); |
1291 | 1276 | ||
1292 | if (!desc) { | 1277 | if (!desc) { |
1293 | dev_err(&ioat_chan->device->pdev->dev, | 1278 | dev_err(to_dev(ioat_chan), |
1294 | "Unable to start null desc - get next desc failed\n"); | 1279 | "Unable to start null desc - get next desc failed\n"); |
1295 | spin_unlock_bh(&ioat_chan->desc_lock); | 1280 | spin_unlock_bh(&ioat_chan->desc_lock); |
1296 | return; | 1281 | return; |
@@ -1303,15 +1288,15 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
1303 | desc->hw->size = NULL_DESC_BUFFER_SIZE; | 1288 | desc->hw->size = NULL_DESC_BUFFER_SIZE; |
1304 | desc->hw->src_addr = 0; | 1289 | desc->hw->src_addr = 0; |
1305 | desc->hw->dst_addr = 0; | 1290 | desc->hw->dst_addr = 0; |
1306 | async_tx_ack(&desc->async_tx); | 1291 | async_tx_ack(&desc->txd); |
1307 | switch (ioat_chan->device->version) { | 1292 | switch (ioat_chan->device->version) { |
1308 | case IOAT_VER_1_2: | 1293 | case IOAT_VER_1_2: |
1309 | desc->hw->next = 0; | 1294 | desc->hw->next = 0; |
1310 | list_add_tail(&desc->node, &ioat_chan->used_desc); | 1295 | list_add_tail(&desc->node, &ioat_chan->used_desc); |
1311 | 1296 | ||
1312 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | 1297 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, |
1313 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | 1298 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); |
1314 | writel(((u64) desc->async_tx.phys) >> 32, | 1299 | writel(((u64) desc->txd.phys) >> 32, |
1315 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | 1300 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); |
1316 | 1301 | ||
1317 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | 1302 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base |
@@ -1319,9 +1304,9 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
1319 | break; | 1304 | break; |
1320 | case IOAT_VER_2_0: | 1305 | case IOAT_VER_2_0: |
1321 | case IOAT_VER_3_0: | 1306 | case IOAT_VER_3_0: |
1322 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | 1307 | writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, |
1323 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | 1308 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); |
1324 | writel(((u64) desc->async_tx.phys) >> 32, | 1309 | writel(((u64) desc->txd.phys) >> 32, |
1325 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | 1310 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); |
1326 | 1311 | ||
1327 | ioat_chan->dmacount++; | 1312 | ioat_chan->dmacount++; |
@@ -1352,6 +1337,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1352 | int i; | 1337 | int i; |
1353 | u8 *src; | 1338 | u8 *src; |
1354 | u8 *dest; | 1339 | u8 *dest; |
1340 | struct dma_device *dma = &device->common; | ||
1341 | struct device *dev = &device->pdev->dev; | ||
1355 | struct dma_chan *dma_chan; | 1342 | struct dma_chan *dma_chan; |
1356 | struct dma_async_tx_descriptor *tx; | 1343 | struct dma_async_tx_descriptor *tx; |
1357 | dma_addr_t dma_dest, dma_src; | 1344 | dma_addr_t dma_dest, dma_src; |
@@ -1375,26 +1362,21 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1375 | src[i] = (u8)i; | 1362 | src[i] = (u8)i; |
1376 | 1363 | ||
1377 | /* Start copy, using first DMA channel */ | 1364 | /* Start copy, using first DMA channel */ |
1378 | dma_chan = container_of(device->common.channels.next, | 1365 | dma_chan = container_of(dma->channels.next, struct dma_chan, |
1379 | struct dma_chan, | ||
1380 | device_node); | 1366 | device_node); |
1381 | if (device->common.device_alloc_chan_resources(dma_chan) < 1) { | 1367 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { |
1382 | dev_err(&device->pdev->dev, | 1368 | dev_err(dev, "selftest cannot allocate chan resource\n"); |
1383 | "selftest cannot allocate chan resource\n"); | ||
1384 | err = -ENODEV; | 1369 | err = -ENODEV; |
1385 | goto out; | 1370 | goto out; |
1386 | } | 1371 | } |
1387 | 1372 | ||
1388 | dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, | 1373 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
1389 | DMA_TO_DEVICE); | 1374 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
1390 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | ||
1391 | DMA_FROM_DEVICE); | ||
1392 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; | 1375 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; |
1393 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 1376 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
1394 | IOAT_TEST_SIZE, flags); | 1377 | IOAT_TEST_SIZE, flags); |
1395 | if (!tx) { | 1378 | if (!tx) { |
1396 | dev_err(&device->pdev->dev, | 1379 | dev_err(dev, "Self-test prep failed, disabling\n"); |
1397 | "Self-test prep failed, disabling\n"); | ||
1398 | err = -ENODEV; | 1380 | err = -ENODEV; |
1399 | goto free_resources; | 1381 | goto free_resources; |
1400 | } | 1382 | } |
@@ -1405,32 +1387,29 @@ static int ioat_dma_self_test(struct ioatdma_device *device) | |||
1405 | tx->callback_param = &cmp; | 1387 | tx->callback_param = &cmp; |
1406 | cookie = tx->tx_submit(tx); | 1388 | cookie = tx->tx_submit(tx); |
1407 | if (cookie < 0) { | 1389 | if (cookie < 0) { |
1408 | dev_err(&device->pdev->dev, | 1390 | dev_err(dev, "Self-test setup failed, disabling\n"); |
1409 | "Self-test setup failed, disabling\n"); | ||
1410 | err = -ENODEV; | 1391 | err = -ENODEV; |
1411 | goto free_resources; | 1392 | goto free_resources; |
1412 | } | 1393 | } |
1413 | device->common.device_issue_pending(dma_chan); | 1394 | dma->device_issue_pending(dma_chan); |
1414 | 1395 | ||
1415 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | 1396 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
1416 | 1397 | ||
1417 | if (tmo == 0 || | 1398 | if (tmo == 0 || |
1418 | device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | 1399 | dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) |
1419 | != DMA_SUCCESS) { | 1400 | != DMA_SUCCESS) { |
1420 | dev_err(&device->pdev->dev, | 1401 | dev_err(dev, "Self-test copy timed out, disabling\n"); |
1421 | "Self-test copy timed out, disabling\n"); | ||
1422 | err = -ENODEV; | 1402 | err = -ENODEV; |
1423 | goto free_resources; | 1403 | goto free_resources; |
1424 | } | 1404 | } |
1425 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | 1405 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { |
1426 | dev_err(&device->pdev->dev, | 1406 | dev_err(dev, "Self-test copy failed compare, disabling\n"); |
1427 | "Self-test copy failed compare, disabling\n"); | ||
1428 | err = -ENODEV; | 1407 | err = -ENODEV; |
1429 | goto free_resources; | 1408 | goto free_resources; |
1430 | } | 1409 | } |
1431 | 1410 | ||
1432 | free_resources: | 1411 | free_resources: |
1433 | device->common.device_free_chan_resources(dma_chan); | 1412 | dma->device_free_chan_resources(dma_chan); |
1434 | out: | 1413 | out: |
1435 | kfree(src); | 1414 | kfree(src); |
1436 | kfree(dest); | 1415 | kfree(dest); |
@@ -1483,15 +1462,14 @@ msix: | |||
1483 | 1462 | ||
1484 | for (i = 0; i < msixcnt; i++) { | 1463 | for (i = 0; i < msixcnt; i++) { |
1485 | msix = &device->msix_entries[i]; | 1464 | msix = &device->msix_entries[i]; |
1486 | ioat_chan = ioat_lookup_chan_by_index(device, i); | 1465 | ioat_chan = ioat_chan_by_index(device, i); |
1487 | err = devm_request_irq(dev, msix->vector, | 1466 | err = devm_request_irq(dev, msix->vector, |
1488 | ioat_dma_do_interrupt_msix, 0, | 1467 | ioat_dma_do_interrupt_msix, 0, |
1489 | "ioat-msix", ioat_chan); | 1468 | "ioat-msix", ioat_chan); |
1490 | if (err) { | 1469 | if (err) { |
1491 | for (j = 0; j < i; j++) { | 1470 | for (j = 0; j < i; j++) { |
1492 | msix = &device->msix_entries[j]; | 1471 | msix = &device->msix_entries[j]; |
1493 | ioat_chan = | 1472 | ioat_chan = ioat_chan_by_index(device, j); |
1494 | ioat_lookup_chan_by_index(device, j); | ||
1495 | devm_free_irq(dev, msix->vector, ioat_chan); | 1473 | devm_free_irq(dev, msix->vector, ioat_chan); |
1496 | } | 1474 | } |
1497 | goto msix_single_vector; | 1475 | goto msix_single_vector; |
@@ -1561,12 +1539,13 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) | |||
1561 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | 1539 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); |
1562 | } | 1540 | } |
1563 | 1541 | ||
1564 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | 1542 | struct ioatdma_device * |
1565 | void __iomem *iobase) | 1543 | ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) |
1566 | { | 1544 | { |
1567 | int err; | 1545 | int err; |
1568 | struct device *dev = &pdev->dev; | 1546 | struct device *dev = &pdev->dev; |
1569 | struct ioatdma_device *device; | 1547 | struct ioatdma_device *device; |
1548 | struct dma_device *dma; | ||
1570 | 1549 | ||
1571 | device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); | 1550 | device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); |
1572 | if (!device) | 1551 | if (!device) |
@@ -1574,6 +1553,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
1574 | device->pdev = pdev; | 1553 | device->pdev = pdev; |
1575 | device->reg_base = iobase; | 1554 | device->reg_base = iobase; |
1576 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | 1555 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
1556 | dma = &device->common; | ||
1577 | 1557 | ||
1578 | /* DMA coherent memory pool for DMA descriptor allocations */ | 1558 | /* DMA coherent memory pool for DMA descriptor allocations */ |
1579 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | 1559 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, |
@@ -1592,36 +1572,32 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
1592 | goto err_completion_pool; | 1572 | goto err_completion_pool; |
1593 | } | 1573 | } |
1594 | 1574 | ||
1595 | INIT_LIST_HEAD(&device->common.channels); | 1575 | INIT_LIST_HEAD(&dma->channels); |
1596 | ioat_dma_enumerate_channels(device); | 1576 | ioat_dma_enumerate_channels(device); |
1597 | 1577 | ||
1598 | device->common.device_alloc_chan_resources = | 1578 | dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; |
1599 | ioat_dma_alloc_chan_resources; | 1579 | dma->device_free_chan_resources = ioat_dma_free_chan_resources; |
1600 | device->common.device_free_chan_resources = | 1580 | dma->dev = &pdev->dev; |
1601 | ioat_dma_free_chan_resources; | ||
1602 | device->common.dev = &pdev->dev; | ||
1603 | 1581 | ||
1604 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | 1582 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
1605 | device->common.device_is_tx_complete = ioat_dma_is_complete; | 1583 | dma->device_is_tx_complete = ioat_dma_is_complete; |
1606 | switch (device->version) { | 1584 | switch (device->version) { |
1607 | case IOAT_VER_1_2: | 1585 | case IOAT_VER_1_2: |
1608 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | 1586 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; |
1609 | device->common.device_issue_pending = | 1587 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; |
1610 | ioat1_dma_memcpy_issue_pending; | ||
1611 | break; | 1588 | break; |
1612 | case IOAT_VER_2_0: | 1589 | case IOAT_VER_2_0: |
1613 | case IOAT_VER_3_0: | 1590 | case IOAT_VER_3_0: |
1614 | device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | 1591 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; |
1615 | device->common.device_issue_pending = | 1592 | dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; |
1616 | ioat2_dma_memcpy_issue_pending; | ||
1617 | break; | 1593 | break; |
1618 | } | 1594 | } |
1619 | 1595 | ||
1620 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," | 1596 | dev_err(dev, "Intel(R) I/OAT DMA Engine found," |
1621 | " %d channels, device version 0x%02x, driver version %s\n", | 1597 | " %d channels, device version 0x%02x, driver version %s\n", |
1622 | device->common.chancnt, device->version, IOAT_DMA_VERSION); | 1598 | dma->chancnt, device->version, IOAT_DMA_VERSION); |
1623 | 1599 | ||
1624 | if (!device->common.chancnt) { | 1600 | if (!dma->chancnt) { |
1625 | dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " | 1601 | dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " |
1626 | "zero channels detected\n"); | 1602 | "zero channels detected\n"); |
1627 | goto err_setup_interrupts; | 1603 | goto err_setup_interrupts; |
@@ -1635,7 +1611,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
1635 | if (err) | 1611 | if (err) |
1636 | goto err_self_test; | 1612 | goto err_self_test; |
1637 | 1613 | ||
1638 | err = dma_async_device_register(&device->common); | 1614 | err = dma_async_device_register(dma); |
1639 | if (err) | 1615 | if (err) |
1640 | goto err_self_test; | 1616 | goto err_self_test; |
1641 | 1617 | ||
@@ -1663,19 +1639,19 @@ void ioat_dma_remove(struct ioatdma_device *device) | |||
1663 | { | 1639 | { |
1664 | struct dma_chan *chan, *_chan; | 1640 | struct dma_chan *chan, *_chan; |
1665 | struct ioat_dma_chan *ioat_chan; | 1641 | struct ioat_dma_chan *ioat_chan; |
1642 | struct dma_device *dma = &device->common; | ||
1666 | 1643 | ||
1667 | if (device->version != IOAT_VER_3_0) | 1644 | if (device->version != IOAT_VER_3_0) |
1668 | cancel_delayed_work(&device->work); | 1645 | cancel_delayed_work(&device->work); |
1669 | 1646 | ||
1670 | ioat_disable_interrupts(device); | 1647 | ioat_disable_interrupts(device); |
1671 | 1648 | ||
1672 | dma_async_device_unregister(&device->common); | 1649 | dma_async_device_unregister(dma); |
1673 | 1650 | ||
1674 | pci_pool_destroy(device->dma_pool); | 1651 | pci_pool_destroy(device->dma_pool); |
1675 | pci_pool_destroy(device->completion_pool); | 1652 | pci_pool_destroy(device->completion_pool); |
1676 | 1653 | ||
1677 | list_for_each_entry_safe(chan, _chan, | 1654 | list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) { |
1678 | &device->common.channels, device_node) { | ||
1679 | ioat_chan = to_ioat_chan(chan); | 1655 | ioat_chan = to_ioat_chan(chan); |
1680 | list_del(&chan->device_node); | 1656 | list_del(&chan->device_node); |
1681 | } | 1657 | } |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5e8d7cfabc21..c5eabae4c1b9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -38,7 +38,8 @@ | |||
38 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) | 38 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) |
39 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | 39 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) |
40 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | 40 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
41 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) | 41 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) |
42 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) | ||
42 | 43 | ||
43 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | 44 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) |
44 | 45 | ||
@@ -123,7 +124,7 @@ struct ioat_dma_chan { | |||
123 | * @node: this descriptor will either be on the free list, | 124 | * @node: this descriptor will either be on the free list, |
124 | * or attached to a transaction list (async_tx.tx_list) | 125 | * or attached to a transaction list (async_tx.tx_list) |
125 | * @tx_cnt: number of descriptors required to complete the transaction | 126 | * @tx_cnt: number of descriptors required to complete the transaction |
126 | * @async_tx: the generic software descriptor for all engines | 127 | * @txd: the generic software descriptor for all engines |
127 | */ | 128 | */ |
128 | struct ioat_desc_sw { | 129 | struct ioat_desc_sw { |
129 | struct ioat_dma_descriptor *hw; | 130 | struct ioat_dma_descriptor *hw; |
@@ -132,7 +133,7 @@ struct ioat_desc_sw { | |||
132 | size_t len; | 133 | size_t len; |
133 | dma_addr_t src; | 134 | dma_addr_t src; |
134 | dma_addr_t dst; | 135 | dma_addr_t dst; |
135 | struct dma_async_tx_descriptor async_tx; | 136 | struct dma_async_tx_descriptor txd; |
136 | }; | 137 | }; |
137 | 138 | ||
138 | static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) | 139 | static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) |