diff options
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_eq.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_eq.c | 43 |
1 files changed, 10 insertions, 33 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 76785c653c13..7c9d35f39d75 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -474,7 +474,6 @@ static int mthca_create_eq(struct mthca_dev *dev, | |||
474 | struct mthca_eq_context *eq_context; | 474 | struct mthca_eq_context *eq_context; |
475 | int err = -ENOMEM; | 475 | int err = -ENOMEM; |
476 | int i; | 476 | int i; |
477 | u8 status; | ||
478 | 477 | ||
479 | eq->dev = dev; | 478 | eq->dev = dev; |
480 | eq->nent = roundup_pow_of_two(max(nent, 2)); | 479 | eq->nent = roundup_pow_of_two(max(nent, 2)); |
@@ -543,15 +542,9 @@ static int mthca_create_eq(struct mthca_dev *dev, | |||
543 | eq_context->intr = intr; | 542 | eq_context->intr = intr; |
544 | eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); | 543 | eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); |
545 | 544 | ||
546 | err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); | 545 | err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn); |
547 | if (err) { | 546 | if (err) { |
548 | mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); | 547 | mthca_warn(dev, "SW2HW_EQ returned %d\n", err); |
549 | goto err_out_free_mr; | ||
550 | } | ||
551 | if (status) { | ||
552 | mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n", | ||
553 | status); | ||
554 | err = -EINVAL; | ||
555 | goto err_out_free_mr; | 548 | goto err_out_free_mr; |
556 | } | 549 | } |
557 | 550 | ||
@@ -597,7 +590,6 @@ static void mthca_free_eq(struct mthca_dev *dev, | |||
597 | { | 590 | { |
598 | struct mthca_mailbox *mailbox; | 591 | struct mthca_mailbox *mailbox; |
599 | int err; | 592 | int err; |
600 | u8 status; | ||
601 | int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / | 593 | int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / |
602 | PAGE_SIZE; | 594 | PAGE_SIZE; |
603 | int i; | 595 | int i; |
@@ -606,11 +598,9 @@ static void mthca_free_eq(struct mthca_dev *dev, | |||
606 | if (IS_ERR(mailbox)) | 598 | if (IS_ERR(mailbox)) |
607 | return; | 599 | return; |
608 | 600 | ||
609 | err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); | 601 | err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn); |
610 | if (err) | 602 | if (err) |
611 | mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); | 603 | mthca_warn(dev, "HW2SW_EQ returned %d\n", err); |
612 | if (status) | ||
613 | mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); | ||
614 | 604 | ||
615 | dev->eq_table.arm_mask &= ~eq->eqn_mask; | 605 | dev->eq_table.arm_mask &= ~eq->eqn_mask; |
616 | 606 | ||
@@ -738,7 +728,6 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev) | |||
738 | int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | 728 | int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) |
739 | { | 729 | { |
740 | int ret; | 730 | int ret; |
741 | u8 status; | ||
742 | 731 | ||
743 | /* | 732 | /* |
744 | * We assume that mapping one page is enough for the whole EQ | 733 | * We assume that mapping one page is enough for the whole EQ |
@@ -757,9 +746,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | |||
757 | return -ENOMEM; | 746 | return -ENOMEM; |
758 | } | 747 | } |
759 | 748 | ||
760 | ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); | 749 | ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt); |
761 | if (!ret && status) | ||
762 | ret = -EINVAL; | ||
763 | if (ret) { | 750 | if (ret) { |
764 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, | 751 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, |
765 | PCI_DMA_BIDIRECTIONAL); | 752 | PCI_DMA_BIDIRECTIONAL); |
@@ -771,9 +758,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | |||
771 | 758 | ||
772 | void mthca_unmap_eq_icm(struct mthca_dev *dev) | 759 | void mthca_unmap_eq_icm(struct mthca_dev *dev) |
773 | { | 760 | { |
774 | u8 status; | 761 | mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1); |
775 | |||
776 | mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status); | ||
777 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, | 762 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, |
778 | PCI_DMA_BIDIRECTIONAL); | 763 | PCI_DMA_BIDIRECTIONAL); |
779 | __free_page(dev->eq_table.icm_page); | 764 | __free_page(dev->eq_table.icm_page); |
@@ -782,7 +767,6 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev) | |||
782 | int mthca_init_eq_table(struct mthca_dev *dev) | 767 | int mthca_init_eq_table(struct mthca_dev *dev) |
783 | { | 768 | { |
784 | int err; | 769 | int err; |
785 | u8 status; | ||
786 | u8 intr; | 770 | u8 intr; |
787 | int i; | 771 | int i; |
788 | 772 | ||
@@ -864,22 +848,16 @@ int mthca_init_eq_table(struct mthca_dev *dev) | |||
864 | } | 848 | } |
865 | 849 | ||
866 | err = mthca_MAP_EQ(dev, async_mask(dev), | 850 | err = mthca_MAP_EQ(dev, async_mask(dev), |
867 | 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); | 851 | 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); |
868 | if (err) | 852 | if (err) |
869 | mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | 853 | mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", |
870 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); | 854 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); |
871 | if (status) | ||
872 | mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n", | ||
873 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status); | ||
874 | 855 | ||
875 | err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, | 856 | err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, |
876 | 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); | 857 | 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); |
877 | if (err) | 858 | if (err) |
878 | mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", | 859 | mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", |
879 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); | 860 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); |
880 | if (status) | ||
881 | mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n", | ||
882 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); | ||
883 | 861 | ||
884 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | 862 | for (i = 0; i < MTHCA_NUM_EQ; ++i) |
885 | if (mthca_is_memfree(dev)) | 863 | if (mthca_is_memfree(dev)) |
@@ -909,15 +887,14 @@ err_out_free: | |||
909 | 887 | ||
910 | void mthca_cleanup_eq_table(struct mthca_dev *dev) | 888 | void mthca_cleanup_eq_table(struct mthca_dev *dev) |
911 | { | 889 | { |
912 | u8 status; | ||
913 | int i; | 890 | int i; |
914 | 891 | ||
915 | mthca_free_irqs(dev); | 892 | mthca_free_irqs(dev); |
916 | 893 | ||
917 | mthca_MAP_EQ(dev, async_mask(dev), | 894 | mthca_MAP_EQ(dev, async_mask(dev), |
918 | 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); | 895 | 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); |
919 | mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, | 896 | mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, |
920 | 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); | 897 | 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); |
921 | 898 | ||
922 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | 899 | for (i = 0; i < MTHCA_NUM_EQ; ++i) |
923 | mthca_free_eq(dev, &dev->eq_table.eq[i]); | 900 | mthca_free_eq(dev, &dev->eq_table.eq[i]); |