diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/ioat_dma.c | 346 | ||||
-rw-r--r-- | drivers/dma/ioatdma.h | 12 | ||||
-rw-r--r-- | drivers/dma/ioatdma_registers.h | 6 |
3 files changed, 298 insertions, 66 deletions
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index eef83ea291a3..66c5bb53211b 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -47,6 +47,65 @@ | |||
47 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); | 47 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); |
48 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | 48 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); |
49 | 49 | ||
50 | static struct ioat_dma_chan *ioat_lookup_chan_by_index(struct ioatdma_device *device, | ||
51 | int index) | ||
52 | { | ||
53 | return device->idx[index]; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | ||
58 | * @irq: interrupt id | ||
59 | * @data: interrupt data | ||
60 | */ | ||
61 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | ||
62 | { | ||
63 | struct ioatdma_device *instance = data; | ||
64 | struct ioat_dma_chan *ioat_chan; | ||
65 | unsigned long attnstatus; | ||
66 | int bit; | ||
67 | u8 intrctrl; | ||
68 | |||
69 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
70 | |||
71 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | ||
72 | return IRQ_NONE; | ||
73 | |||
74 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | ||
75 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
76 | return IRQ_NONE; | ||
77 | } | ||
78 | |||
79 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | ||
80 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | ||
81 | ioat_chan = ioat_lookup_chan_by_index(instance, bit); | ||
82 | tasklet_schedule(&ioat_chan->cleanup_task); | ||
83 | } | ||
84 | |||
85 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
86 | return IRQ_HANDLED; | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | ||
91 | * @irq: interrupt id | ||
92 | * @data: interrupt data | ||
93 | */ | ||
94 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | ||
95 | { | ||
96 | struct ioat_dma_chan *ioat_chan = data; | ||
97 | |||
98 | tasklet_schedule(&ioat_chan->cleanup_task); | ||
99 | |||
100 | return IRQ_HANDLED; | ||
101 | } | ||
102 | |||
103 | static void ioat_dma_cleanup_tasklet(unsigned long data); | ||
104 | |||
105 | /** | ||
106 | * ioat_dma_enumerate_channels - find and initialize the device's channels | ||
107 | * @device: the device to be enumerated | ||
108 | */ | ||
50 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | 109 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) |
51 | { | 110 | { |
52 | u8 xfercap_scale; | 111 | u8 xfercap_scale; |
@@ -76,6 +135,11 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |||
76 | ioat_chan->common.device = &device->common; | 135 | ioat_chan->common.device = &device->common; |
77 | list_add_tail(&ioat_chan->common.device_node, | 136 | list_add_tail(&ioat_chan->common.device_node, |
78 | &device->common.channels); | 137 | &device->common.channels); |
138 | device->idx[i] = ioat_chan; | ||
139 | tasklet_init(&ioat_chan->cleanup_task, | ||
140 | ioat_dma_cleanup_tasklet, | ||
141 | (unsigned long) ioat_chan); | ||
142 | tasklet_disable(&ioat_chan->cleanup_task); | ||
79 | } | 143 | } |
80 | return device->common.chancnt; | 144 | return device->common.chancnt; |
81 | } | 145 | } |
@@ -234,6 +298,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) | |||
234 | writel(((u64) ioat_chan->completion_addr) >> 32, | 298 | writel(((u64) ioat_chan->completion_addr) >> 32, |
235 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | 299 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
236 | 300 | ||
301 | tasklet_enable(&ioat_chan->cleanup_task); | ||
237 | ioat_dma_start_null_desc(ioat_chan); | 302 | ioat_dma_start_null_desc(ioat_chan); |
238 | return i; | 303 | return i; |
239 | } | 304 | } |
@@ -245,9 +310,14 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
245 | struct ioat_desc_sw *desc, *_desc; | 310 | struct ioat_desc_sw *desc, *_desc; |
246 | int in_use_descs = 0; | 311 | int in_use_descs = 0; |
247 | 312 | ||
313 | tasklet_disable(&ioat_chan->cleanup_task); | ||
248 | ioat_dma_memcpy_cleanup(ioat_chan); | 314 | ioat_dma_memcpy_cleanup(ioat_chan); |
249 | 315 | ||
316 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
317 | * before removing DMA descriptor resources. | ||
318 | */ | ||
250 | writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); | 319 | writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
320 | mdelay(100); | ||
251 | 321 | ||
252 | spin_lock_bh(&ioat_chan->desc_lock); | 322 | spin_lock_bh(&ioat_chan->desc_lock); |
253 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { | 323 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { |
@@ -276,6 +346,34 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |||
276 | in_use_descs - 1); | 346 | in_use_descs - 1); |
277 | 347 | ||
278 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | 348 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; |
349 | ioat_chan->pending = 0; | ||
350 | } | ||
351 | /** | ||
352 | * ioat_dma_get_next_descriptor - return the next available descriptor | ||
353 | * @ioat_chan: IOAT DMA channel handle | ||
354 | * | ||
355 | * Gets the next descriptor from the chain, and must be called with the | ||
356 | * channel's desc_lock held. Allocates more descriptors if the channel | ||
357 | * has run out. | ||
358 | */ | ||
359 | static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | ||
360 | struct ioat_dma_chan *ioat_chan) | ||
361 | { | ||
362 | struct ioat_desc_sw *new = NULL; | ||
363 | |||
364 | if (!list_empty(&ioat_chan->free_desc)) { | ||
365 | new = to_ioat_desc(ioat_chan->free_desc.next); | ||
366 | list_del(&new->node); | ||
367 | } else { | ||
368 | /* try to get another desc */ | ||
369 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | ||
370 | /* will this ever happen? */ | ||
371 | /* TODO add upper limit on these */ | ||
372 | BUG_ON(!new); | ||
373 | } | ||
374 | |||
375 | prefetch(new->hw); | ||
376 | return new; | ||
279 | } | 377 | } |
280 | 378 | ||
281 | static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( | 379 | static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( |
@@ -300,17 +398,7 @@ static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( | |||
300 | 398 | ||
301 | spin_lock_bh(&ioat_chan->desc_lock); | 399 | spin_lock_bh(&ioat_chan->desc_lock); |
302 | while (len) { | 400 | while (len) { |
303 | if (!list_empty(&ioat_chan->free_desc)) { | 401 | new = ioat_dma_get_next_descriptor(ioat_chan); |
304 | new = to_ioat_desc(ioat_chan->free_desc.next); | ||
305 | list_del(&new->node); | ||
306 | } else { | ||
307 | /* try to get another desc */ | ||
308 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | ||
309 | /* will this ever happen? */ | ||
310 | /* TODO add upper limit on these */ | ||
311 | BUG_ON(!new); | ||
312 | } | ||
313 | |||
314 | copy = min((u32) len, ioat_chan->xfercap); | 402 | copy = min((u32) len, ioat_chan->xfercap); |
315 | 403 | ||
316 | new->hw->size = copy; | 404 | new->hw->size = copy; |
@@ -360,6 +448,14 @@ static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) | |||
360 | } | 448 | } |
361 | } | 449 | } |
362 | 450 | ||
451 | static void ioat_dma_cleanup_tasklet(unsigned long data) | ||
452 | { | ||
453 | struct ioat_dma_chan *chan = (void *)data; | ||
454 | ioat_dma_memcpy_cleanup(chan); | ||
455 | writew(IOAT_CHANCTRL_INT_DISABLE, | ||
456 | chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
457 | } | ||
458 | |||
363 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | 459 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) |
364 | { | 460 | { |
365 | unsigned long phys_complete; | 461 | unsigned long phys_complete; |
@@ -397,6 +493,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
397 | return; | 493 | return; |
398 | } | 494 | } |
399 | 495 | ||
496 | cookie = 0; | ||
400 | spin_lock_bh(&ioat_chan->desc_lock); | 497 | spin_lock_bh(&ioat_chan->desc_lock); |
401 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { | 498 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { |
402 | 499 | ||
@@ -509,48 +606,13 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | |||
509 | 606 | ||
510 | /* PCI API */ | 607 | /* PCI API */ |
511 | 608 | ||
512 | static irqreturn_t ioat_do_interrupt(int irq, void *data) | ||
513 | { | ||
514 | struct ioatdma_device *instance = data; | ||
515 | unsigned long attnstatus; | ||
516 | u8 intrctrl; | ||
517 | |||
518 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
519 | |||
520 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | ||
521 | return IRQ_NONE; | ||
522 | |||
523 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | ||
524 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
525 | return IRQ_NONE; | ||
526 | } | ||
527 | |||
528 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | ||
529 | |||
530 | printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus); | ||
531 | |||
532 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | ||
533 | return IRQ_HANDLED; | ||
534 | } | ||
535 | |||
536 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | 609 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) |
537 | { | 610 | { |
538 | struct ioat_desc_sw *desc; | 611 | struct ioat_desc_sw *desc; |
539 | 612 | ||
540 | spin_lock_bh(&ioat_chan->desc_lock); | 613 | spin_lock_bh(&ioat_chan->desc_lock); |
541 | 614 | ||
542 | if (!list_empty(&ioat_chan->free_desc)) { | 615 | desc = ioat_dma_get_next_descriptor(ioat_chan); |
543 | desc = to_ioat_desc(ioat_chan->free_desc.next); | ||
544 | list_del(&desc->node); | ||
545 | } else { | ||
546 | /* try to get another desc */ | ||
547 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
548 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); | ||
549 | spin_lock_bh(&ioat_chan->desc_lock); | ||
550 | /* will this ever happen? */ | ||
551 | BUG_ON(!desc); | ||
552 | } | ||
553 | |||
554 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; | 616 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; |
555 | desc->hw->next = 0; | 617 | desc->hw->next = 0; |
556 | desc->async_tx.ack = 1; | 618 | desc->async_tx.ack = 1; |
@@ -571,7 +633,11 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
571 | */ | 633 | */ |
572 | #define IOAT_TEST_SIZE 2000 | 634 | #define IOAT_TEST_SIZE 2000 |
573 | 635 | ||
574 | static int ioat_self_test(struct ioatdma_device *device) | 636 | /** |
637 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | ||
638 | * @device: device to be tested | ||
639 | */ | ||
640 | static int ioat_dma_self_test(struct ioatdma_device *device) | ||
575 | { | 641 | { |
576 | int i; | 642 | int i; |
577 | u8 *src; | 643 | u8 *src; |
@@ -639,6 +705,161 @@ out: | |||
639 | return err; | 705 | return err; |
640 | } | 706 | } |
641 | 707 | ||
708 | static char ioat_interrupt_style[32] = "msix"; | ||
709 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | ||
710 | sizeof(ioat_interrupt_style), 0644); | ||
711 | MODULE_PARM_DESC(ioat_interrupt_style, | ||
712 | "set ioat interrupt style: msix (default), " | ||
713 | "msix-single-vector, msi, intx)"); | ||
714 | |||
715 | /** | ||
716 | * ioat_dma_setup_interrupts - setup interrupt handler | ||
717 | * @device: ioat device | ||
718 | */ | ||
719 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | ||
720 | { | ||
721 | struct ioat_dma_chan *ioat_chan; | ||
722 | int err, i, j, msixcnt; | ||
723 | u8 intrctrl = 0; | ||
724 | |||
725 | if (!strcmp(ioat_interrupt_style, "msix")) | ||
726 | goto msix; | ||
727 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
728 | goto msix_single_vector; | ||
729 | if (!strcmp(ioat_interrupt_style, "msi")) | ||
730 | goto msi; | ||
731 | if (!strcmp(ioat_interrupt_style, "intx")) | ||
732 | goto intx; | ||
733 | |||
734 | msix: | ||
735 | /* The number of MSI-X vectors should equal the number of channels */ | ||
736 | msixcnt = device->common.chancnt; | ||
737 | for (i = 0; i < msixcnt; i++) | ||
738 | device->msix_entries[i].entry = i; | ||
739 | |||
740 | err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); | ||
741 | if (err < 0) | ||
742 | goto msi; | ||
743 | if (err > 0) | ||
744 | goto msix_single_vector; | ||
745 | |||
746 | for (i = 0; i < msixcnt; i++) { | ||
747 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
748 | err = request_irq(device->msix_entries[i].vector, | ||
749 | ioat_dma_do_interrupt_msix, | ||
750 | 0, "ioat-msix", ioat_chan); | ||
751 | if (err) { | ||
752 | for (j = 0; j < i; j++) { | ||
753 | ioat_chan = | ||
754 | ioat_lookup_chan_by_index(device, j); | ||
755 | free_irq(device->msix_entries[j].vector, | ||
756 | ioat_chan); | ||
757 | } | ||
758 | goto msix_single_vector; | ||
759 | } | ||
760 | } | ||
761 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | ||
762 | device->irq_mode = msix_multi_vector; | ||
763 | goto done; | ||
764 | |||
765 | msix_single_vector: | ||
766 | device->msix_entries[0].entry = 0; | ||
767 | err = pci_enable_msix(device->pdev, device->msix_entries, 1); | ||
768 | if (err) | ||
769 | goto msi; | ||
770 | |||
771 | err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, | ||
772 | 0, "ioat-msix", device); | ||
773 | if (err) { | ||
774 | pci_disable_msix(device->pdev); | ||
775 | goto msi; | ||
776 | } | ||
777 | device->irq_mode = msix_single_vector; | ||
778 | goto done; | ||
779 | |||
780 | msi: | ||
781 | err = pci_enable_msi(device->pdev); | ||
782 | if (err) | ||
783 | goto intx; | ||
784 | |||
785 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | ||
786 | 0, "ioat-msi", device); | ||
787 | if (err) { | ||
788 | pci_disable_msi(device->pdev); | ||
789 | goto intx; | ||
790 | } | ||
791 | /* | ||
792 | * CB 1.2 devices need a bit set in configuration space to enable MSI | ||
793 | */ | ||
794 | if (device->version == IOAT_VER_1_2) { | ||
795 | u32 dmactrl; | ||
796 | pci_read_config_dword(device->pdev, | ||
797 | IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
798 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
799 | pci_write_config_dword(device->pdev, | ||
800 | IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
801 | } | ||
802 | device->irq_mode = msi; | ||
803 | goto done; | ||
804 | |||
805 | intx: | ||
806 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | ||
807 | IRQF_SHARED, "ioat-intx", device); | ||
808 | if (err) | ||
809 | goto err_no_irq; | ||
810 | device->irq_mode = intx; | ||
811 | |||
812 | done: | ||
813 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | ||
814 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
815 | return 0; | ||
816 | |||
817 | err_no_irq: | ||
818 | /* Disable all interrupt generation */ | ||
819 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
820 | dev_err(&device->pdev->dev, "no usable interrupts\n"); | ||
821 | device->irq_mode = none; | ||
822 | return -1; | ||
823 | } | ||
824 | |||
825 | /** | ||
826 | * ioat_dma_remove_interrupts - remove whatever interrupts were set | ||
827 | * @device: ioat device | ||
828 | */ | ||
829 | static void ioat_dma_remove_interrupts(struct ioatdma_device *device) | ||
830 | { | ||
831 | struct ioat_dma_chan *ioat_chan; | ||
832 | int i; | ||
833 | |||
834 | /* Disable all interrupt generation */ | ||
835 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
836 | |||
837 | switch (device->irq_mode) { | ||
838 | case msix_multi_vector: | ||
839 | for (i = 0; i < device->common.chancnt; i++) { | ||
840 | ioat_chan = ioat_lookup_chan_by_index(device, i); | ||
841 | free_irq(device->msix_entries[i].vector, ioat_chan); | ||
842 | } | ||
843 | pci_disable_msix(device->pdev); | ||
844 | break; | ||
845 | case msix_single_vector: | ||
846 | free_irq(device->msix_entries[0].vector, device); | ||
847 | pci_disable_msix(device->pdev); | ||
848 | break; | ||
849 | case msi: | ||
850 | free_irq(device->pdev->irq, device); | ||
851 | pci_disable_msi(device->pdev); | ||
852 | break; | ||
853 | case intx: | ||
854 | free_irq(device->pdev->irq, device); | ||
855 | break; | ||
856 | case none: | ||
857 | dev_warn(&device->pdev->dev, | ||
858 | "call to %s without interrupts setup\n", __func__); | ||
859 | } | ||
860 | device->irq_mode = none; | ||
861 | } | ||
862 | |||
642 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | 863 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, |
643 | void __iomem *iobase) | 864 | void __iomem *iobase) |
644 | { | 865 | { |
@@ -684,21 +905,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
684 | device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; | 905 | device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; |
685 | device->common.device_dependency_added = ioat_dma_dependency_added; | 906 | device->common.device_dependency_added = ioat_dma_dependency_added; |
686 | device->common.dev = &pdev->dev; | 907 | device->common.dev = &pdev->dev; |
687 | printk(KERN_INFO "ioatdma: Intel(R) I/OAT DMA Engine found," | 908 | dev_err(&device->pdev->dev, |
688 | " %d channels, device version 0x%02x\n", | 909 | "ioatdma: Intel(R) I/OAT DMA Engine found," |
689 | device->common.chancnt, device->version); | 910 | " %d channels, device version 0x%02x\n", |
911 | device->common.chancnt, device->version); | ||
690 | 912 | ||
691 | pci_set_drvdata(pdev, device); | 913 | err = ioat_dma_setup_interrupts(device); |
692 | err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat", | ||
693 | device); | ||
694 | if (err) | 914 | if (err) |
695 | goto err_irq; | 915 | goto err_setup_interrupts; |
696 | |||
697 | writeb(IOAT_INTRCTRL_MASTER_INT_EN, | ||
698 | device->reg_base + IOAT_INTRCTRL_OFFSET); | ||
699 | pci_set_master(pdev); | ||
700 | 916 | ||
701 | err = ioat_self_test(device); | 917 | err = ioat_dma_self_test(device); |
702 | if (err) | 918 | if (err) |
703 | goto err_self_test; | 919 | goto err_self_test; |
704 | 920 | ||
@@ -707,8 +923,8 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
707 | return device; | 923 | return device; |
708 | 924 | ||
709 | err_self_test: | 925 | err_self_test: |
710 | free_irq(device->pdev->irq, device); | 926 | ioat_dma_remove_interrupts(device); |
711 | err_irq: | 927 | err_setup_interrupts: |
712 | pci_pool_destroy(device->completion_pool); | 928 | pci_pool_destroy(device->completion_pool); |
713 | err_completion_pool: | 929 | err_completion_pool: |
714 | pci_pool_destroy(device->dma_pool); | 930 | pci_pool_destroy(device->dma_pool); |
@@ -716,8 +932,8 @@ err_dma_pool: | |||
716 | kfree(device); | 932 | kfree(device); |
717 | err_kzalloc: | 933 | err_kzalloc: |
718 | iounmap(iobase); | 934 | iounmap(iobase); |
719 | printk(KERN_ERR | 935 | dev_err(&device->pdev->dev, |
720 | "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n"); | 936 | "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n"); |
721 | return NULL; | 937 | return NULL; |
722 | } | 938 | } |
723 | 939 | ||
@@ -728,7 +944,7 @@ void ioat_dma_remove(struct ioatdma_device *device) | |||
728 | 944 | ||
729 | dma_async_device_unregister(&device->common); | 945 | dma_async_device_unregister(&device->common); |
730 | 946 | ||
731 | free_irq(device->pdev->irq, device); | 947 | ioat_dma_remove_interrupts(device); |
732 | 948 | ||
733 | pci_pool_destroy(device->dma_pool); | 949 | pci_pool_destroy(device->dma_pool); |
734 | pci_pool_destroy(device->completion_pool); | 950 | pci_pool_destroy(device->completion_pool); |
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h index 0b8ffbde1e61..2abf0b88a973 100644 --- a/drivers/dma/ioatdma.h +++ b/drivers/dma/ioatdma.h | |||
@@ -28,6 +28,14 @@ | |||
28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
29 | #include <linux/pci_ids.h> | 29 | #include <linux/pci_ids.h> |
30 | 30 | ||
31 | enum ioat_interrupt { | ||
32 | none = 0, | ||
33 | msix_multi_vector = 1, | ||
34 | msix_single_vector = 2, | ||
35 | msi = 3, | ||
36 | intx = 4, | ||
37 | }; | ||
38 | |||
31 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | 39 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 |
32 | 40 | ||
33 | /** | 41 | /** |
@@ -46,6 +54,9 @@ struct ioatdma_device { | |||
46 | struct pci_pool *completion_pool; | 54 | struct pci_pool *completion_pool; |
47 | struct dma_device common; | 55 | struct dma_device common; |
48 | u8 version; | 56 | u8 version; |
57 | enum ioat_interrupt irq_mode; | ||
58 | struct msix_entry msix_entries[4]; | ||
59 | struct ioat_dma_chan *idx[4]; | ||
49 | }; | 60 | }; |
50 | 61 | ||
51 | /** | 62 | /** |
@@ -94,6 +105,7 @@ struct ioat_dma_chan { | |||
94 | u32 high; | 105 | u32 high; |
95 | }; | 106 | }; |
96 | } *completion_virt; | 107 | } *completion_virt; |
108 | struct tasklet_struct cleanup_task; | ||
97 | }; | 109 | }; |
98 | 110 | ||
99 | /* wrapper around hardware descriptor format + additional software fields */ | 111 | /* wrapper around hardware descriptor format + additional software fields */ |
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h index a30c7349075a..baaab5ea146a 100644 --- a/drivers/dma/ioatdma_registers.h +++ b/drivers/dma/ioatdma_registers.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | 2 | * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License as published by the Free | 5 | * under the terms of the GNU General Public License as published by the Free |
@@ -21,6 +21,9 @@ | |||
21 | #ifndef _IOAT_REGISTERS_H_ | 21 | #ifndef _IOAT_REGISTERS_H_ |
22 | #define _IOAT_REGISTERS_H_ | 22 | #define _IOAT_REGISTERS_H_ |
23 | 23 | ||
24 | #define IOAT_PCI_DMACTRL_OFFSET 0x48 | ||
25 | #define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 | ||
26 | #define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 | ||
24 | 27 | ||
25 | /* MMIO Device Registers */ | 28 | /* MMIO Device Registers */ |
26 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ | 29 | #define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ |
@@ -39,6 +42,7 @@ | |||
39 | #define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */ | 42 | #define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */ |
40 | #define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */ | 43 | #define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */ |
41 | #define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */ | 44 | #define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */ |
45 | #define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */ | ||
42 | 46 | ||
43 | #define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */ | 47 | #define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */ |
44 | 48 | ||