diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qes.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 48 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_reqs.c | 60 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_tools.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_fs.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_iba7220.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ud.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 17 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 1 |
14 files changed, 131 insertions, 49 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 0b0618edd645..1ab919f836a8 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -156,6 +156,14 @@ struct ehca_mod_qp_parm { | |||
156 | 156 | ||
157 | #define EHCA_MOD_QP_PARM_MAX 4 | 157 | #define EHCA_MOD_QP_PARM_MAX 4 |
158 | 158 | ||
159 | #define QMAP_IDX_MASK 0xFFFFULL | ||
160 | |||
161 | /* struct for tracking if cqes have been reported to the application */ | ||
162 | struct ehca_qmap_entry { | ||
163 | u16 app_wr_id; | ||
164 | u16 reported; | ||
165 | }; | ||
166 | |||
159 | struct ehca_qp { | 167 | struct ehca_qp { |
160 | union { | 168 | union { |
161 | struct ib_qp ib_qp; | 169 | struct ib_qp ib_qp; |
@@ -165,6 +173,7 @@ struct ehca_qp { | |||
165 | enum ehca_ext_qp_type ext_type; | 173 | enum ehca_ext_qp_type ext_type; |
166 | enum ib_qp_state state; | 174 | enum ib_qp_state state; |
167 | struct ipz_queue ipz_squeue; | 175 | struct ipz_queue ipz_squeue; |
176 | struct ehca_qmap_entry *sq_map; | ||
168 | struct ipz_queue ipz_rqueue; | 177 | struct ipz_queue ipz_rqueue; |
169 | struct h_galpas galpas; | 178 | struct h_galpas galpas; |
170 | u32 qkey; | 179 | u32 qkey; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h index 818803057ebf..5d28e3e98a20 100644 --- a/drivers/infiniband/hw/ehca/ehca_qes.h +++ b/drivers/infiniband/hw/ehca/ehca_qes.h | |||
@@ -213,6 +213,7 @@ struct ehca_wqe { | |||
213 | #define WC_STATUS_ERROR_BIT 0x80000000 | 213 | #define WC_STATUS_ERROR_BIT 0x80000000 |
214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 | 214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 |
215 | #define WC_STATUS_PURGE_BIT 0x10 | 215 | #define WC_STATUS_PURGE_BIT 0x10 |
216 | #define WC_SEND_RECEIVE_BIT 0x80 | ||
216 | 217 | ||
217 | struct ehca_cqe { | 218 | struct ehca_cqe { |
218 | u64 work_request_id; | 219 | u64 work_request_id; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index ea13efddf175..b6bcee036734 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -412,6 +412,7 @@ static struct ehca_qp *internal_create_qp( | |||
412 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, | 412 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, |
413 | ib_device); | 413 | ib_device); |
414 | struct ib_ucontext *context = NULL; | 414 | struct ib_ucontext *context = NULL; |
415 | u32 nr_qes; | ||
415 | u64 h_ret; | 416 | u64 h_ret; |
416 | int is_llqp = 0, has_srq = 0; | 417 | int is_llqp = 0, has_srq = 0; |
417 | int qp_type, max_send_sge, max_recv_sge, ret; | 418 | int qp_type, max_send_sge, max_recv_sge, ret; |
@@ -715,6 +716,15 @@ static struct ehca_qp *internal_create_qp( | |||
715 | "and pages ret=%i", ret); | 716 | "and pages ret=%i", ret); |
716 | goto create_qp_exit2; | 717 | goto create_qp_exit2; |
717 | } | 718 | } |
719 | nr_qes = my_qp->ipz_squeue.queue_length / | ||
720 | my_qp->ipz_squeue.qe_size; | ||
721 | my_qp->sq_map = vmalloc(nr_qes * | ||
722 | sizeof(struct ehca_qmap_entry)); | ||
723 | if (!my_qp->sq_map) { | ||
724 | ehca_err(pd->device, "Couldn't allocate squeue " | ||
725 | "map ret=%i", ret); | ||
726 | goto create_qp_exit3; | ||
727 | } | ||
718 | } | 728 | } |
719 | 729 | ||
720 | if (HAS_RQ(my_qp)) { | 730 | if (HAS_RQ(my_qp)) { |
@@ -724,7 +734,7 @@ static struct ehca_qp *internal_create_qp( | |||
724 | if (ret) { | 734 | if (ret) { |
725 | ehca_err(pd->device, "Couldn't initialize rqueue " | 735 | ehca_err(pd->device, "Couldn't initialize rqueue " |
726 | "and pages ret=%i", ret); | 736 | "and pages ret=%i", ret); |
727 | goto create_qp_exit3; | 737 | goto create_qp_exit4; |
728 | } | 738 | } |
729 | } | 739 | } |
730 | 740 | ||
@@ -770,7 +780,7 @@ static struct ehca_qp *internal_create_qp( | |||
770 | if (!my_qp->mod_qp_parm) { | 780 | if (!my_qp->mod_qp_parm) { |
771 | ehca_err(pd->device, | 781 | ehca_err(pd->device, |
772 | "Could not alloc mod_qp_parm"); | 782 | "Could not alloc mod_qp_parm"); |
773 | goto create_qp_exit4; | 783 | goto create_qp_exit5; |
774 | } | 784 | } |
775 | } | 785 | } |
776 | } | 786 | } |
@@ -780,7 +790,7 @@ static struct ehca_qp *internal_create_qp( | |||
780 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); | 790 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); |
781 | if (h_ret != H_SUCCESS) { | 791 | if (h_ret != H_SUCCESS) { |
782 | ret = ehca2ib_return_code(h_ret); | 792 | ret = ehca2ib_return_code(h_ret); |
783 | goto create_qp_exit5; | 793 | goto create_qp_exit6; |
784 | } | 794 | } |
785 | } | 795 | } |
786 | 796 | ||
@@ -789,7 +799,7 @@ static struct ehca_qp *internal_create_qp( | |||
789 | if (ret) { | 799 | if (ret) { |
790 | ehca_err(pd->device, | 800 | ehca_err(pd->device, |
791 | "Couldn't assign qp to send_cq ret=%i", ret); | 801 | "Couldn't assign qp to send_cq ret=%i", ret); |
792 | goto create_qp_exit5; | 802 | goto create_qp_exit6; |
793 | } | 803 | } |
794 | } | 804 | } |
795 | 805 | ||
@@ -815,22 +825,26 @@ static struct ehca_qp *internal_create_qp( | |||
815 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { | 825 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { |
816 | ehca_err(pd->device, "Copy to udata failed"); | 826 | ehca_err(pd->device, "Copy to udata failed"); |
817 | ret = -EINVAL; | 827 | ret = -EINVAL; |
818 | goto create_qp_exit6; | 828 | goto create_qp_exit7; |
819 | } | 829 | } |
820 | } | 830 | } |
821 | 831 | ||
822 | return my_qp; | 832 | return my_qp; |
823 | 833 | ||
824 | create_qp_exit6: | 834 | create_qp_exit7: |
825 | ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); | 835 | ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); |
826 | 836 | ||
827 | create_qp_exit5: | 837 | create_qp_exit6: |
828 | kfree(my_qp->mod_qp_parm); | 838 | kfree(my_qp->mod_qp_parm); |
829 | 839 | ||
830 | create_qp_exit4: | 840 | create_qp_exit5: |
831 | if (HAS_RQ(my_qp)) | 841 | if (HAS_RQ(my_qp)) |
832 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 842 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
833 | 843 | ||
844 | create_qp_exit4: | ||
845 | if (HAS_SQ(my_qp)) | ||
846 | vfree(my_qp->sq_map); | ||
847 | |||
834 | create_qp_exit3: | 848 | create_qp_exit3: |
835 | if (HAS_SQ(my_qp)) | 849 | if (HAS_SQ(my_qp)) |
836 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); | 850 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); |
@@ -1534,8 +1548,6 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1534 | if (attr_mask & IB_QP_QKEY) | 1548 | if (attr_mask & IB_QP_QKEY) |
1535 | my_qp->qkey = attr->qkey; | 1549 | my_qp->qkey = attr->qkey; |
1536 | 1550 | ||
1537 | my_qp->state = qp_new_state; | ||
1538 | |||
1539 | modify_qp_exit2: | 1551 | modify_qp_exit2: |
1540 | if (squeue_locked) { /* this means: sqe -> rts */ | 1552 | if (squeue_locked) { /* this means: sqe -> rts */ |
1541 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); | 1553 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); |
@@ -1551,6 +1563,8 @@ modify_qp_exit1: | |||
1551 | int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, | 1563 | int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, |
1552 | struct ib_udata *udata) | 1564 | struct ib_udata *udata) |
1553 | { | 1565 | { |
1566 | int ret = 0; | ||
1567 | |||
1554 | struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, | 1568 | struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, |
1555 | ib_device); | 1569 | ib_device); |
1556 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); | 1570 | struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); |
@@ -1597,12 +1611,18 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, | |||
1597 | attr->qp_state, my_qp->init_attr.port_num, | 1611 | attr->qp_state, my_qp->init_attr.port_num, |
1598 | ibqp->qp_type); | 1612 | ibqp->qp_type); |
1599 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); | 1613 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); |
1600 | return 0; | 1614 | goto out; |
1601 | } | 1615 | } |
1602 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); | 1616 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); |
1603 | } | 1617 | } |
1604 | 1618 | ||
1605 | return internal_modify_qp(ibqp, attr, attr_mask, 0); | 1619 | ret = internal_modify_qp(ibqp, attr, attr_mask, 0); |
1620 | |||
1621 | out: | ||
1622 | if ((ret == 0) && (attr_mask & IB_QP_STATE)) | ||
1623 | my_qp->state = attr->qp_state; | ||
1624 | |||
1625 | return ret; | ||
1606 | } | 1626 | } |
1607 | 1627 | ||
1608 | void ehca_recover_sqp(struct ib_qp *sqp) | 1628 | void ehca_recover_sqp(struct ib_qp *sqp) |
@@ -1973,8 +1993,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | |||
1973 | 1993 | ||
1974 | if (HAS_RQ(my_qp)) | 1994 | if (HAS_RQ(my_qp)) |
1975 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 1995 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
1976 | if (HAS_SQ(my_qp)) | 1996 | if (HAS_SQ(my_qp)) { |
1977 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); | 1997 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); |
1998 | vfree(my_qp->sq_map); | ||
1999 | } | ||
1978 | kmem_cache_free(qp_cache, my_qp); | 2000 | kmem_cache_free(qp_cache, my_qp); |
1979 | atomic_dec(&shca->num_qps); | 2001 | atomic_dec(&shca->num_qps); |
1980 | return 0; | 2002 | return 0; |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 898c8b5c38dd..4426d82fe798 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -139,6 +139,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | |||
139 | static inline int ehca_write_swqe(struct ehca_qp *qp, | 139 | static inline int ehca_write_swqe(struct ehca_qp *qp, |
140 | struct ehca_wqe *wqe_p, | 140 | struct ehca_wqe *wqe_p, |
141 | const struct ib_send_wr *send_wr, | 141 | const struct ib_send_wr *send_wr, |
142 | u32 sq_map_idx, | ||
142 | int hidden) | 143 | int hidden) |
143 | { | 144 | { |
144 | u32 idx; | 145 | u32 idx; |
@@ -157,7 +158,11 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
157 | /* clear wqe header until sglist */ | 158 | /* clear wqe header until sglist */ |
158 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); | 159 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); |
159 | 160 | ||
160 | wqe_p->work_request_id = send_wr->wr_id; | 161 | wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK; |
162 | wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK; | ||
163 | |||
164 | qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK; | ||
165 | qp->sq_map[sq_map_idx].reported = 0; | ||
161 | 166 | ||
162 | switch (send_wr->opcode) { | 167 | switch (send_wr->opcode) { |
163 | case IB_WR_SEND: | 168 | case IB_WR_SEND: |
@@ -381,6 +386,7 @@ static inline int post_one_send(struct ehca_qp *my_qp, | |||
381 | { | 386 | { |
382 | struct ehca_wqe *wqe_p; | 387 | struct ehca_wqe *wqe_p; |
383 | int ret; | 388 | int ret; |
389 | u32 sq_map_idx; | ||
384 | u64 start_offset = my_qp->ipz_squeue.current_q_offset; | 390 | u64 start_offset = my_qp->ipz_squeue.current_q_offset; |
385 | 391 | ||
386 | /* get pointer next to free WQE */ | 392 | /* get pointer next to free WQE */ |
@@ -393,8 +399,15 @@ static inline int post_one_send(struct ehca_qp *my_qp, | |||
393 | "qp_num=%x", my_qp->ib_qp.qp_num); | 399 | "qp_num=%x", my_qp->ib_qp.qp_num); |
394 | return -ENOMEM; | 400 | return -ENOMEM; |
395 | } | 401 | } |
402 | |||
403 | /* | ||
404 | * Get the index of the WQE in the send queue. The same index is used | ||
405 | * for writing into the sq_map. | ||
406 | */ | ||
407 | sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size; | ||
408 | |||
396 | /* write a SEND WQE into the QUEUE */ | 409 | /* write a SEND WQE into the QUEUE */ |
397 | ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden); | 410 | ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden); |
398 | /* | 411 | /* |
399 | * if something failed, | 412 | * if something failed, |
400 | * reset the free entry pointer to the start value | 413 | * reset the free entry pointer to the start value |
@@ -589,7 +602,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) | |||
589 | struct ehca_qp *my_qp; | 602 | struct ehca_qp *my_qp; |
590 | int cqe_count = 0, is_error; | 603 | int cqe_count = 0, is_error; |
591 | 604 | ||
592 | poll_cq_one_read_cqe: | 605 | repoll: |
593 | cqe = (struct ehca_cqe *) | 606 | cqe = (struct ehca_cqe *) |
594 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); | 607 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); |
595 | if (!cqe) { | 608 | if (!cqe) { |
@@ -617,7 +630,7 @@ poll_cq_one_read_cqe: | |||
617 | ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", | 630 | ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", |
618 | my_cq->cq_number, cqe->local_qp_number); | 631 | my_cq->cq_number, cqe->local_qp_number); |
619 | /* ignore this purged cqe */ | 632 | /* ignore this purged cqe */ |
620 | goto poll_cq_one_read_cqe; | 633 | goto repoll; |
621 | } | 634 | } |
622 | spin_lock_irqsave(&qp->spinlock_s, flags); | 635 | spin_lock_irqsave(&qp->spinlock_s, flags); |
623 | purgeflag = qp->sqerr_purgeflag; | 636 | purgeflag = qp->sqerr_purgeflag; |
@@ -636,7 +649,7 @@ poll_cq_one_read_cqe: | |||
636 | * that caused sqe and turn off purge flag | 649 | * that caused sqe and turn off purge flag |
637 | */ | 650 | */ |
638 | qp->sqerr_purgeflag = 0; | 651 | qp->sqerr_purgeflag = 0; |
639 | goto poll_cq_one_read_cqe; | 652 | goto repoll; |
640 | } | 653 | } |
641 | } | 654 | } |
642 | 655 | ||
@@ -654,8 +667,34 @@ poll_cq_one_read_cqe: | |||
654 | my_cq, my_cq->cq_number); | 667 | my_cq, my_cq->cq_number); |
655 | } | 668 | } |
656 | 669 | ||
657 | /* we got a completion! */ | 670 | read_lock(&ehca_qp_idr_lock); |
658 | wc->wr_id = cqe->work_request_id; | 671 | my_qp = idr_find(&ehca_qp_idr, cqe->qp_token); |
672 | read_unlock(&ehca_qp_idr_lock); | ||
673 | if (!my_qp) | ||
674 | goto repoll; | ||
675 | wc->qp = &my_qp->ib_qp; | ||
676 | |||
677 | if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) { | ||
678 | struct ehca_qmap_entry *qmap_entry; | ||
679 | /* | ||
680 | * We got a send completion and need to restore the original | ||
681 | * wr_id. | ||
682 | */ | ||
683 | qmap_entry = &my_qp->sq_map[cqe->work_request_id & | ||
684 | QMAP_IDX_MASK]; | ||
685 | |||
686 | if (qmap_entry->reported) { | ||
687 | ehca_warn(cq->device, "Double cqe on qp_num=%#x", | ||
688 | my_qp->real_qp_num); | ||
689 | /* found a double cqe, discard it and read next one */ | ||
690 | goto repoll; | ||
691 | } | ||
692 | wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK; | ||
693 | wc->wr_id |= qmap_entry->app_wr_id; | ||
694 | qmap_entry->reported = 1; | ||
695 | } else | ||
696 | /* We got a receive completion. */ | ||
697 | wc->wr_id = cqe->work_request_id; | ||
659 | 698 | ||
660 | /* eval ib_wc_opcode */ | 699 | /* eval ib_wc_opcode */ |
661 | wc->opcode = ib_wc_opcode[cqe->optype]-1; | 700 | wc->opcode = ib_wc_opcode[cqe->optype]-1; |
@@ -667,7 +706,7 @@ poll_cq_one_read_cqe: | |||
667 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", | 706 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", |
668 | my_cq, my_cq->cq_number); | 707 | my_cq, my_cq->cq_number); |
669 | /* update also queue adder to throw away this entry!!! */ | 708 | /* update also queue adder to throw away this entry!!! */ |
670 | goto poll_cq_one_exit0; | 709 | goto repoll; |
671 | } | 710 | } |
672 | 711 | ||
673 | /* eval ib_wc_status */ | 712 | /* eval ib_wc_status */ |
@@ -678,11 +717,6 @@ poll_cq_one_read_cqe: | |||
678 | } else | 717 | } else |
679 | wc->status = IB_WC_SUCCESS; | 718 | wc->status = IB_WC_SUCCESS; |
680 | 719 | ||
681 | read_lock(&ehca_qp_idr_lock); | ||
682 | my_qp = idr_find(&ehca_qp_idr, cqe->qp_token); | ||
683 | wc->qp = &my_qp->ib_qp; | ||
684 | read_unlock(&ehca_qp_idr_lock); | ||
685 | |||
686 | wc->byte_len = cqe->nr_bytes_transferred; | 720 | wc->byte_len = cqe->nr_bytes_transferred; |
687 | wc->pkey_index = cqe->pkey_index; | 721 | wc->pkey_index = cqe->pkey_index; |
688 | wc->slid = cqe->rlid; | 722 | wc->slid = cqe->rlid; |
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h index ec950bf8c479..21f7d06f14ad 100644 --- a/drivers/infiniband/hw/ehca/ehca_tools.h +++ b/drivers/infiniband/hw/ehca/ehca_tools.h | |||
@@ -54,7 +54,6 @@ | |||
54 | #include <linux/module.h> | 54 | #include <linux/module.h> |
55 | #include <linux/moduleparam.h> | 55 | #include <linux/moduleparam.h> |
56 | #include <linux/vmalloc.h> | 56 | #include <linux/vmalloc.h> |
57 | #include <linux/version.h> | ||
58 | #include <linux/notifier.h> | 57 | #include <linux/notifier.h> |
59 | #include <linux/cpu.h> | 58 | #include <linux/cpu.h> |
60 | #include <linux/device.h> | 59 | #include <linux/device.h> |
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 23faba9d21eb..8bb5170b4e41 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/version.h> | ||
35 | #include <linux/module.h> | 34 | #include <linux/module.h> |
36 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
37 | #include <linux/mount.h> | 36 | #include <linux/mount.h> |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c index d90f5e9a54fa..9839e20119bc 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c | |||
@@ -1720,7 +1720,7 @@ static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, | |||
1720 | "not 2KB aligned!\n", pa); | 1720 | "not 2KB aligned!\n", pa); |
1721 | return; | 1721 | return; |
1722 | } | 1722 | } |
1723 | if (pa >= (1UL << IBA7220_TID_SZ_SHIFT)) { | 1723 | if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { |
1724 | ipath_dev_err(dd, | 1724 | ipath_dev_err(dd, |
1725 | "BUG: Physical page address 0x%lx " | 1725 | "BUG: Physical page address 0x%lx " |
1726 | "larger than supported\n", pa); | 1726 | "larger than supported\n", pa); |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 36aa242c487c..729446f56aab 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -267,6 +267,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
267 | u16 lrh0; | 267 | u16 lrh0; |
268 | u16 lid; | 268 | u16 lid; |
269 | int ret = 0; | 269 | int ret = 0; |
270 | int next_cur; | ||
270 | 271 | ||
271 | spin_lock_irqsave(&qp->s_lock, flags); | 272 | spin_lock_irqsave(&qp->s_lock, flags); |
272 | 273 | ||
@@ -290,8 +291,9 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
290 | goto bail; | 291 | goto bail; |
291 | 292 | ||
292 | wqe = get_swqe_ptr(qp, qp->s_cur); | 293 | wqe = get_swqe_ptr(qp, qp->s_cur); |
293 | if (++qp->s_cur >= qp->s_size) | 294 | next_cur = qp->s_cur + 1; |
294 | qp->s_cur = 0; | 295 | if (next_cur >= qp->s_size) |
296 | next_cur = 0; | ||
295 | 297 | ||
296 | /* Construct the header. */ | 298 | /* Construct the header. */ |
297 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | 299 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; |
@@ -315,6 +317,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
315 | qp->s_flags |= IPATH_S_WAIT_DMA; | 317 | qp->s_flags |= IPATH_S_WAIT_DMA; |
316 | goto bail; | 318 | goto bail; |
317 | } | 319 | } |
320 | qp->s_cur = next_cur; | ||
318 | spin_unlock_irqrestore(&qp->s_lock, flags); | 321 | spin_unlock_irqrestore(&qp->s_lock, flags); |
319 | ipath_ud_loopback(qp, wqe); | 322 | ipath_ud_loopback(qp, wqe); |
320 | spin_lock_irqsave(&qp->s_lock, flags); | 323 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -323,6 +326,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
323 | } | 326 | } |
324 | } | 327 | } |
325 | 328 | ||
329 | qp->s_cur = next_cur; | ||
326 | extra_bytes = -wqe->length & 3; | 330 | extra_bytes = -wqe->length & 3; |
327 | nwords = (wqe->length + extra_bytes) >> 2; | 331 | nwords = (wqe->length + extra_bytes) >> 2; |
328 | 332 | ||
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index a4cdb465cd1d..87f5c5a87b98 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -204,6 +204,8 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | |||
204 | if (err) | 204 | if (err) |
205 | goto err_mr; | 205 | goto err_mr; |
206 | 206 | ||
207 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | ||
208 | |||
207 | return &mr->ibmr; | 209 | return &mr->ibmr; |
208 | 210 | ||
209 | err_mr: | 211 | err_mr: |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 39bd897b40c6..8eb7ae96974d 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/dma-mapping.h> | 43 | #include <linux/dma-mapping.h> |
44 | #include <linux/workqueue.h> | 44 | #include <linux/workqueue.h> |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
46 | #include <linux/version.h> | ||
47 | #include <asm/io.h> | 46 | #include <asm/io.h> |
48 | #include <linux/crc32c.h> | 47 | #include <linux/crc32c.h> |
49 | 48 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7ebc400a4b3d..341ffedafed6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -202,7 +202,7 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev, | |||
202 | dev_kfree_skb_any(rx_ring[i].skb); | 202 | dev_kfree_skb_any(rx_ring[i].skb); |
203 | } | 203 | } |
204 | 204 | ||
205 | kfree(rx_ring); | 205 | vfree(rx_ring); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) | 208 | static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) |
@@ -352,9 +352,14 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i | |||
352 | int ret; | 352 | int ret; |
353 | int i; | 353 | int i; |
354 | 354 | ||
355 | rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL); | 355 | rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring); |
356 | if (!rx->rx_ring) | 356 | if (!rx->rx_ring) { |
357 | printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", | ||
358 | priv->ca->name, ipoib_recvq_size); | ||
357 | return -ENOMEM; | 359 | return -ENOMEM; |
360 | } | ||
361 | |||
362 | memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); | ||
358 | 363 | ||
359 | t = kmalloc(sizeof *t, GFP_KERNEL); | 364 | t = kmalloc(sizeof *t, GFP_KERNEL); |
360 | if (!t) { | 365 | if (!t) { |
@@ -1494,14 +1499,16 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) | |||
1494 | return; | 1499 | return; |
1495 | } | 1500 | } |
1496 | 1501 | ||
1497 | priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, | 1502 | priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); |
1498 | GFP_KERNEL); | ||
1499 | if (!priv->cm.srq_ring) { | 1503 | if (!priv->cm.srq_ring) { |
1500 | printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", | 1504 | printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", |
1501 | priv->ca->name, ipoib_recvq_size); | 1505 | priv->ca->name, ipoib_recvq_size); |
1502 | ib_destroy_srq(priv->cm.srq); | 1506 | ib_destroy_srq(priv->cm.srq); |
1503 | priv->cm.srq = NULL; | 1507 | priv->cm.srq = NULL; |
1508 | return; | ||
1504 | } | 1509 | } |
1510 | |||
1511 | memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring); | ||
1505 | } | 1512 | } |
1506 | 1513 | ||
1507 | int ipoib_cm_dev_init(struct net_device *dev) | 1514 | int ipoib_cm_dev_init(struct net_device *dev) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index f51201b17bfd..7e9e218738fa 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -156,14 +156,8 @@ static int ipoib_stop(struct net_device *dev) | |||
156 | 156 | ||
157 | netif_stop_queue(dev); | 157 | netif_stop_queue(dev); |
158 | 158 | ||
159 | /* | 159 | ipoib_ib_dev_down(dev, 0); |
160 | * Now flush workqueue to make sure a scheduled task doesn't | 160 | ipoib_ib_dev_stop(dev, 0); |
161 | * bring our internal state back up. | ||
162 | */ | ||
163 | flush_workqueue(ipoib_workqueue); | ||
164 | |||
165 | ipoib_ib_dev_down(dev, 1); | ||
166 | ipoib_ib_dev_stop(dev, 1); | ||
167 | 161 | ||
168 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 162 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
169 | struct ipoib_dev_priv *cpriv; | 163 | struct ipoib_dev_priv *cpriv; |
@@ -1314,7 +1308,7 @@ sysfs_failed: | |||
1314 | 1308 | ||
1315 | register_failed: | 1309 | register_failed: |
1316 | ib_unregister_event_handler(&priv->event_handler); | 1310 | ib_unregister_event_handler(&priv->event_handler); |
1317 | flush_scheduled_work(); | 1311 | flush_workqueue(ipoib_workqueue); |
1318 | 1312 | ||
1319 | event_failed: | 1313 | event_failed: |
1320 | ipoib_dev_cleanup(priv->dev); | 1314 | ipoib_dev_cleanup(priv->dev); |
@@ -1373,7 +1367,12 @@ static void ipoib_remove_one(struct ib_device *device) | |||
1373 | 1367 | ||
1374 | list_for_each_entry_safe(priv, tmp, dev_list, list) { | 1368 | list_for_each_entry_safe(priv, tmp, dev_list, list) { |
1375 | ib_unregister_event_handler(&priv->event_handler); | 1369 | ib_unregister_event_handler(&priv->event_handler); |
1376 | flush_scheduled_work(); | 1370 | |
1371 | rtnl_lock(); | ||
1372 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); | ||
1373 | rtnl_unlock(); | ||
1374 | |||
1375 | flush_workqueue(ipoib_workqueue); | ||
1377 | 1376 | ||
1378 | unregister_netdev(priv->dev); | 1377 | unregister_netdev(priv->dev); |
1379 | ipoib_dev_cleanup(priv->dev); | 1378 | ipoib_dev_cleanup(priv->dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 8950e9546f4e..ac33c8f3ea85 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -392,8 +392,16 @@ static int ipoib_mcast_join_complete(int status, | |||
392 | &priv->mcast_task, 0); | 392 | &priv->mcast_task, 0); |
393 | mutex_unlock(&mcast_mutex); | 393 | mutex_unlock(&mcast_mutex); |
394 | 394 | ||
395 | if (mcast == priv->broadcast) | 395 | if (mcast == priv->broadcast) { |
396 | /* | ||
397 | * Take RTNL lock here to avoid racing with | ||
398 | * ipoib_stop() and turning the carrier back | ||
399 | * on while a device is being removed. | ||
400 | */ | ||
401 | rtnl_lock(); | ||
396 | netif_carrier_on(dev); | 402 | netif_carrier_on(dev); |
403 | rtnl_unlock(); | ||
404 | } | ||
397 | 405 | ||
398 | return 0; | 406 | return 0; |
399 | } | 407 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 63462ecca147..26ff6214a81f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/version.h> | ||
37 | 36 | ||
38 | #include "iscsi_iser.h" | 37 | #include "iscsi_iser.h" |
39 | 38 | ||