aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb3')
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c103
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c36
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c75
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c13
8 files changed, 206 insertions, 115 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index ed2ee4ba4b7..ebf9d3043f8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -359,9 +359,10 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
359 cq->sw_wptr++; 359 cq->sw_wptr++;
360} 360}
361 361
362void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) 362int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
363{ 363{
364 u32 ptr; 364 u32 ptr;
365 int flushed = 0;
365 366
366 PDBG("%s wq %p cq %p\n", __func__, wq, cq); 367 PDBG("%s wq %p cq %p\n", __func__, wq, cq);
367 368
@@ -369,8 +370,11 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__, 370 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
370 wq->rq_rptr, wq->rq_wptr, count); 371 wq->rq_rptr, wq->rq_wptr, count);
371 ptr = wq->rq_rptr + count; 372 ptr = wq->rq_rptr + count;
372 while (ptr++ != wq->rq_wptr) 373 while (ptr++ != wq->rq_wptr) {
373 insert_recv_cqe(wq, cq); 374 insert_recv_cqe(wq, cq);
375 flushed++;
376 }
377 return flushed;
374} 378}
375 379
376static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, 380static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
@@ -394,9 +398,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
394 cq->sw_wptr++; 398 cq->sw_wptr++;
395} 399}
396 400
397void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) 401int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
398{ 402{
399 __u32 ptr; 403 __u32 ptr;
404 int flushed = 0;
400 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); 405 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
401 406
402 ptr = wq->sq_rptr + count; 407 ptr = wq->sq_rptr + count;
@@ -405,7 +410,9 @@ void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
405 insert_sq_cqe(wq, cq, sqp); 410 insert_sq_cqe(wq, cq, sqp);
406 sqp++; 411 sqp++;
407 ptr++; 412 ptr++;
413 flushed++;
408 } 414 }
415 return flushed;
409} 416}
410 417
411/* 418/*
@@ -581,7 +588,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
581 * caller aquires the ctrl_qp lock before the call 588 * caller aquires the ctrl_qp lock before the call
582 */ 589 */
583static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, 590static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
584 u32 len, void *data, int completion) 591 u32 len, void *data)
585{ 592{
586 u32 i, nr_wqe, copy_len; 593 u32 i, nr_wqe, copy_len;
587 u8 *copy_data; 594 u8 *copy_data;
@@ -617,7 +624,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
617 flag = 0; 624 flag = 0;
618 if (i == (nr_wqe - 1)) { 625 if (i == (nr_wqe - 1)) {
619 /* last WQE */ 626 /* last WQE */
620 flag = completion ? T3_COMPLETION_FLAG : 0; 627 flag = T3_COMPLETION_FLAG;
621 if (len % 32) 628 if (len % 32)
622 utx_len = len / 32 + 1; 629 utx_len = len / 32 + 1;
623 else 630 else
@@ -676,21 +683,20 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
676 return 0; 683 return 0;
677} 684}
678 685
679/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size 686/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
680 * OUT: stag index, actual pbl_size, pbl_addr allocated. 687 * OUT: stag index
681 * TBD: shared memory region support 688 * TBD: shared memory region support
682 */ 689 */
683static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, 690static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
684 u32 *stag, u8 stag_state, u32 pdid, 691 u32 *stag, u8 stag_state, u32 pdid,
685 enum tpt_mem_type type, enum tpt_mem_perm perm, 692 enum tpt_mem_type type, enum tpt_mem_perm perm,
686 u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl, 693 u32 zbva, u64 to, u32 len, u8 page_size,
687 u32 *pbl_size, u32 *pbl_addr) 694 u32 pbl_size, u32 pbl_addr)
688{ 695{
689 int err; 696 int err;
690 struct tpt_entry tpt; 697 struct tpt_entry tpt;
691 u32 stag_idx; 698 u32 stag_idx;
692 u32 wptr; 699 u32 wptr;
693 int rereg = (*stag != T3_STAG_UNSET);
694 700
695 stag_state = stag_state > 0; 701 stag_state = stag_state > 0;
696 stag_idx = (*stag) >> 8; 702 stag_idx = (*stag) >> 8;
@@ -704,30 +710,8 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
704 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", 710 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
705 __func__, stag_state, type, pdid, stag_idx); 711 __func__, stag_state, type, pdid, stag_idx);
706 712
707 if (reset_tpt_entry)
708 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
709 else if (!rereg) {
710 *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);
711 if (!*pbl_addr) {
712 return -ENOMEM;
713 }
714 }
715
716 mutex_lock(&rdev_p->ctrl_qp.lock); 713 mutex_lock(&rdev_p->ctrl_qp.lock);
717 714
718 /* write PBL first if any - update pbl only if pbl list exist */
719 if (pbl) {
720
721 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
722 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
723 *pbl_size);
724 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
725 (*pbl_addr >> 5),
726 (*pbl_size << 3), pbl, 0);
727 if (err)
728 goto ret;
729 }
730
731 /* write TPT entry */ 715 /* write TPT entry */
732 if (reset_tpt_entry) 716 if (reset_tpt_entry)
733 memset(&tpt, 0, sizeof(tpt)); 717 memset(&tpt, 0, sizeof(tpt));
@@ -742,23 +726,23 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
742 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | 726 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
743 V_TPT_PAGE_SIZE(page_size)); 727 V_TPT_PAGE_SIZE(page_size));
744 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : 728 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
745 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3)); 729 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
746 tpt.len = cpu_to_be32(len); 730 tpt.len = cpu_to_be32(len);
747 tpt.va_hi = cpu_to_be32((u32) (to >> 32)); 731 tpt.va_hi = cpu_to_be32((u32) (to >> 32));
748 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); 732 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
749 tpt.rsvd_bind_cnt_or_pstag = 0; 733 tpt.rsvd_bind_cnt_or_pstag = 0;
750 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 : 734 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
751 cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2)); 735 cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
752 } 736 }
753 err = cxio_hal_ctrl_qp_write_mem(rdev_p, 737 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
754 stag_idx + 738 stag_idx +
755 (rdev_p->rnic_info.tpt_base >> 5), 739 (rdev_p->rnic_info.tpt_base >> 5),
756 sizeof(tpt), &tpt, 1); 740 sizeof(tpt), &tpt);
757 741
758 /* release the stag index to free pool */ 742 /* release the stag index to free pool */
759 if (reset_tpt_entry) 743 if (reset_tpt_entry)
760 cxio_hal_put_stag(rdev_p->rscp, stag_idx); 744 cxio_hal_put_stag(rdev_p->rscp, stag_idx);
761ret: 745
762 wptr = rdev_p->ctrl_qp.wptr; 746 wptr = rdev_p->ctrl_qp.wptr;
763 mutex_unlock(&rdev_p->ctrl_qp.lock); 747 mutex_unlock(&rdev_p->ctrl_qp.lock);
764 if (!err) 748 if (!err)
@@ -769,44 +753,67 @@ ret:
769 return err; 753 return err;
770} 754}
771 755
756int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
757 u32 pbl_addr, u32 pbl_size)
758{
759 u32 wptr;
760 int err;
761
762 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
763 __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
764 pbl_size);
765
766 mutex_lock(&rdev_p->ctrl_qp.lock);
767 err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
768 pbl);
769 wptr = rdev_p->ctrl_qp.wptr;
770 mutex_unlock(&rdev_p->ctrl_qp.lock);
771 if (err)
772 return err;
773
774 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
775 SEQ32_GE(rdev_p->ctrl_qp.rptr,
776 wptr)))
777 return -ERESTARTSYS;
778
779 return 0;
780}
781
772int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 782int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
773 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 783 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
774 u8 page_size, __be64 *pbl, u32 *pbl_size, 784 u8 page_size, u32 pbl_size, u32 pbl_addr)
775 u32 *pbl_addr)
776{ 785{
777 *stag = T3_STAG_UNSET; 786 *stag = T3_STAG_UNSET;
778 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, 787 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
779 zbva, to, len, page_size, pbl, pbl_size, pbl_addr); 788 zbva, to, len, page_size, pbl_size, pbl_addr);
780} 789}
781 790
782int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 791int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
783 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 792 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
784 u8 page_size, __be64 *pbl, u32 *pbl_size, 793 u8 page_size, u32 pbl_size, u32 pbl_addr)
785 u32 *pbl_addr)
786{ 794{
787 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, 795 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
788 zbva, to, len, page_size, pbl, pbl_size, pbl_addr); 796 zbva, to, len, page_size, pbl_size, pbl_addr);
789} 797}
790 798
791int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size, 799int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
792 u32 pbl_addr) 800 u32 pbl_addr)
793{ 801{
794 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, 802 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
795 &pbl_size, &pbl_addr); 803 pbl_size, pbl_addr);
796} 804}
797 805
798int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid) 806int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
799{ 807{
800 u32 pbl_size = 0;
801 *stag = T3_STAG_UNSET; 808 *stag = T3_STAG_UNSET;
802 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0, 809 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
803 NULL, &pbl_size, NULL); 810 0, 0);
804} 811}
805 812
806int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag) 813int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
807{ 814{
808 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, 815 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
809 NULL, NULL); 816 0, 0);
810} 817}
811 818
812int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) 819int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 2bcff7f5046..6e128f6bab0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -154,14 +154,14 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
154int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 154int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
155 struct cxio_ucontext *uctx); 155 struct cxio_ucontext *uctx);
156int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 156int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
157int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
158 u32 pbl_addr, u32 pbl_size);
157int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 159int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
158 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 160 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
159 u8 page_size, __be64 *pbl, u32 *pbl_size, 161 u8 page_size, u32 pbl_size, u32 pbl_addr);
160 u32 *pbl_addr);
161int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 162int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
162 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 163 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
163 u8 page_size, __be64 *pbl, u32 *pbl_size, 164 u8 page_size, u32 pbl_size, u32 pbl_addr);
164 u32 *pbl_addr);
165int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size, 165int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
166 u32 pbl_addr); 166 u32 pbl_addr);
167int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid); 167int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
@@ -173,8 +173,8 @@ u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
173void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); 173void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
174int __init cxio_hal_init(void); 174int __init cxio_hal_init(void);
175void __exit cxio_hal_exit(void); 175void __exit cxio_hal_exit(void);
176void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); 176int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
177void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); 177int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
178void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 178void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
179void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 179void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
180void cxio_flush_hw_cq(struct t3_cq *cq); 180void cxio_flush_hw_cq(struct t3_cq *cq);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 45ed4f25ef7..bd233c08765 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -250,7 +250,6 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
250 */ 250 */
251 251
252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ 252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
253#define PBL_CHUNK 2*1024*1024
254 253
255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) 254u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
256{ 255{
@@ -267,14 +266,35 @@ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
267 266
268int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p) 267int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
269{ 268{
270 unsigned long i; 269 unsigned pbl_start, pbl_chunk;
270
271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); 271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
272 if (rdev_p->pbl_pool) 272 if (!rdev_p->pbl_pool)
273 for (i = rdev_p->rnic_info.pbl_base; 273 return -ENOMEM;
274 i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1; 274
275 i += PBL_CHUNK) 275 pbl_start = rdev_p->rnic_info.pbl_base;
276 gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1); 276 pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
277 return rdev_p->pbl_pool ? 0 : -ENOMEM; 277
278 while (pbl_start < rdev_p->rnic_info.pbl_top) {
279 pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
280 pbl_chunk);
281 if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
282 PDBG("%s failed to add PBL chunk (%x/%x)\n",
283 __func__, pbl_start, pbl_chunk);
284 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
285 printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n",
286 __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start);
287 return 0;
288 }
289 pbl_chunk >>= 1;
290 } else {
291 PDBG("%s added PBL chunk (%x/%x)\n",
292 __func__, pbl_start, pbl_chunk);
293 pbl_start += pbl_chunk;
294 }
295 }
296
297 return 0;
278} 298}
279 299
280void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p) 300void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d44a6df9ad8..c325c44807e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -67,10 +67,10 @@ int peer2peer = 0;
67module_param(peer2peer, int, 0644); 67module_param(peer2peer, int, 0644);
68MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 68MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
69 69
70static int ep_timeout_secs = 10; 70static int ep_timeout_secs = 60;
71module_param(ep_timeout_secs, int, 0644); 71module_param(ep_timeout_secs, int, 0644);
72MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 72MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
73 "in seconds (default=10)"); 73 "in seconds (default=60)");
74 74
75static int mpa_rev = 1; 75static int mpa_rev = 1;
76module_param(mpa_rev, int, 0644); 76module_param(mpa_rev, int, 0644);
@@ -1650,8 +1650,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1650 release = 1; 1650 release = 1;
1651 break; 1651 break;
1652 case ABORTING: 1652 case ABORTING:
1653 break;
1654 case DEAD: 1653 case DEAD:
1654 break;
1655 default: 1655 default:
1656 BUG_ON(1); 1656 BUG_ON(1);
1657 break; 1657 break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index 58c3d61bcd1..ec49a5cbdeb 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -35,17 +35,26 @@
35#include <rdma/ib_verbs.h> 35#include <rdma/ib_verbs.h>
36 36
37#include "cxio_hal.h" 37#include "cxio_hal.h"
38#include "cxio_resource.h"
38#include "iwch.h" 39#include "iwch.h"
39#include "iwch_provider.h" 40#include "iwch_provider.h"
40 41
41int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 42static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
42 struct iwch_mr *mhp,
43 int shift,
44 __be64 *page_list)
45{ 43{
46 u32 stag;
47 u32 mmid; 44 u32 mmid;
48 45
46 mhp->attr.state = 1;
47 mhp->attr.stag = stag;
48 mmid = stag >> 8;
49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
50 insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
51 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
52}
53
54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
55 struct iwch_mr *mhp, int shift)
56{
57 u32 stag;
49 58
50 if (cxio_register_phys_mem(&rhp->rdev, 59 if (cxio_register_phys_mem(&rhp->rdev,
51 &stag, mhp->attr.pdid, 60 &stag, mhp->attr.pdid,
@@ -53,28 +62,21 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
53 mhp->attr.zbva, 62 mhp->attr.zbva,
54 mhp->attr.va_fbo, 63 mhp->attr.va_fbo,
55 mhp->attr.len, 64 mhp->attr.len,
56 shift-12, 65 shift - 12,
57 page_list, 66 mhp->attr.pbl_size, mhp->attr.pbl_addr))
58 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
59 return -ENOMEM; 67 return -ENOMEM;
60 mhp->attr.state = 1; 68
61 mhp->attr.stag = stag; 69 iwch_finish_mem_reg(mhp, stag);
62 mmid = stag >> 8; 70
63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
64 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
65 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
66 return 0; 71 return 0;
67} 72}
68 73
69int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 74int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
70 struct iwch_mr *mhp, 75 struct iwch_mr *mhp,
71 int shift, 76 int shift,
72 __be64 *page_list,
73 int npages) 77 int npages)
74{ 78{
75 u32 stag; 79 u32 stag;
76 u32 mmid;
77
78 80
79 /* We could support this... */ 81 /* We could support this... */
80 if (npages > mhp->attr.pbl_size) 82 if (npages > mhp->attr.pbl_size)
@@ -87,19 +89,40 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
87 mhp->attr.zbva, 89 mhp->attr.zbva,
88 mhp->attr.va_fbo, 90 mhp->attr.va_fbo,
89 mhp->attr.len, 91 mhp->attr.len,
90 shift-12, 92 shift - 12,
91 page_list, 93 mhp->attr.pbl_size, mhp->attr.pbl_addr))
92 &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
93 return -ENOMEM; 94 return -ENOMEM;
94 mhp->attr.state = 1; 95
95 mhp->attr.stag = stag; 96 iwch_finish_mem_reg(mhp, stag);
96 mmid = stag >> 8; 97
97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 98 return 0;
98 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 99}
99 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); 100
101int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
102{
103 mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
104 npages << 3);
105
106 if (!mhp->attr.pbl_addr)
107 return -ENOMEM;
108
109 mhp->attr.pbl_size = npages;
110
100 return 0; 111 return 0;
101} 112}
102 113
114void iwch_free_pbl(struct iwch_mr *mhp)
115{
116 cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
117 mhp->attr.pbl_size << 3);
118}
119
120int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
121{
122 return cxio_write_pbl(&mhp->rhp->rdev, pages,
123 mhp->attr.pbl_addr + (offset << 3), npages);
124}
125
103int build_phys_page_list(struct ib_phys_buf *buffer_list, 126int build_phys_page_list(struct ib_phys_buf *buffer_list,
104 int num_phys_buf, 127 int num_phys_buf,
105 u64 *iova_start, 128 u64 *iova_start,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index d07d3a377b5..8934178a23e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -442,6 +442,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
442 mmid = mhp->attr.stag >> 8; 442 mmid = mhp->attr.stag >> 8;
443 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 443 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
444 mhp->attr.pbl_addr); 444 mhp->attr.pbl_addr);
445 iwch_free_pbl(mhp);
445 remove_handle(rhp, &rhp->mmidr, mmid); 446 remove_handle(rhp, &rhp->mmidr, mmid);
446 if (mhp->kva) 447 if (mhp->kva)
447 kfree((void *) (unsigned long) mhp->kva); 448 kfree((void *) (unsigned long) mhp->kva);
@@ -475,6 +476,8 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
475 if (!mhp) 476 if (!mhp)
476 return ERR_PTR(-ENOMEM); 477 return ERR_PTR(-ENOMEM);
477 478
479 mhp->rhp = rhp;
480
478 /* First check that we have enough alignment */ 481 /* First check that we have enough alignment */
479 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { 482 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
480 ret = -EINVAL; 483 ret = -EINVAL;
@@ -492,7 +495,17 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
492 if (ret) 495 if (ret)
493 goto err; 496 goto err;
494 497
495 mhp->rhp = rhp; 498 ret = iwch_alloc_pbl(mhp, npages);
499 if (ret) {
500 kfree(page_list);
501 goto err_pbl;
502 }
503
504 ret = iwch_write_pbl(mhp, page_list, npages, 0);
505 kfree(page_list);
506 if (ret)
507 goto err_pbl;
508
496 mhp->attr.pdid = php->pdid; 509 mhp->attr.pdid = php->pdid;
497 mhp->attr.zbva = 0; 510 mhp->attr.zbva = 0;
498 511
@@ -502,12 +515,15 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
502 515
503 mhp->attr.len = (u32) total_size; 516 mhp->attr.len = (u32) total_size;
504 mhp->attr.pbl_size = npages; 517 mhp->attr.pbl_size = npages;
505 ret = iwch_register_mem(rhp, php, mhp, shift, page_list); 518 ret = iwch_register_mem(rhp, php, mhp, shift);
506 kfree(page_list); 519 if (ret)
507 if (ret) { 520 goto err_pbl;
508 goto err; 521
509 }
510 return &mhp->ibmr; 522 return &mhp->ibmr;
523
524err_pbl:
525 iwch_free_pbl(mhp);
526
511err: 527err:
512 kfree(mhp); 528 kfree(mhp);
513 return ERR_PTR(ret); 529 return ERR_PTR(ret);
@@ -560,7 +576,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
560 return ret; 576 return ret;
561 } 577 }
562 578
563 ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); 579 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
564 kfree(page_list); 580 kfree(page_list);
565 if (ret) { 581 if (ret) {
566 return ret; 582 return ret;
@@ -602,6 +618,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
602 if (!mhp) 618 if (!mhp)
603 return ERR_PTR(-ENOMEM); 619 return ERR_PTR(-ENOMEM);
604 620
621 mhp->rhp = rhp;
622
605 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 623 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
606 if (IS_ERR(mhp->umem)) { 624 if (IS_ERR(mhp->umem)) {
607 err = PTR_ERR(mhp->umem); 625 err = PTR_ERR(mhp->umem);
@@ -615,10 +633,14 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
615 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) 633 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
616 n += chunk->nents; 634 n += chunk->nents;
617 635
618 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 636 err = iwch_alloc_pbl(mhp, n);
637 if (err)
638 goto err;
639
640 pages = (__be64 *) __get_free_page(GFP_KERNEL);
619 if (!pages) { 641 if (!pages) {
620 err = -ENOMEM; 642 err = -ENOMEM;
621 goto err; 643 goto err_pbl;
622 } 644 }
623 645
624 i = n = 0; 646 i = n = 0;
@@ -630,25 +652,38 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
630 pages[i++] = cpu_to_be64(sg_dma_address( 652 pages[i++] = cpu_to_be64(sg_dma_address(
631 &chunk->page_list[j]) + 653 &chunk->page_list[j]) +
632 mhp->umem->page_size * k); 654 mhp->umem->page_size * k);
655 if (i == PAGE_SIZE / sizeof *pages) {
656 err = iwch_write_pbl(mhp, pages, i, n);
657 if (err)
658 goto pbl_done;
659 n += i;
660 i = 0;
661 }
633 } 662 }
634 } 663 }
635 664
636 mhp->rhp = rhp; 665 if (i)
666 err = iwch_write_pbl(mhp, pages, i, n);
667
668pbl_done:
669 free_page((unsigned long) pages);
670 if (err)
671 goto err_pbl;
672
637 mhp->attr.pdid = php->pdid; 673 mhp->attr.pdid = php->pdid;
638 mhp->attr.zbva = 0; 674 mhp->attr.zbva = 0;
639 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 675 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
640 mhp->attr.va_fbo = virt; 676 mhp->attr.va_fbo = virt;
641 mhp->attr.page_size = shift - 12; 677 mhp->attr.page_size = shift - 12;
642 mhp->attr.len = (u32) length; 678 mhp->attr.len = (u32) length;
643 mhp->attr.pbl_size = i; 679
644 err = iwch_register_mem(rhp, php, mhp, shift, pages); 680 err = iwch_register_mem(rhp, php, mhp, shift);
645 kfree(pages);
646 if (err) 681 if (err)
647 goto err; 682 goto err_pbl;
648 683
649 if (udata && !t3a_device(rhp)) { 684 if (udata && !t3a_device(rhp)) {
650 uresp.pbl_addr = (mhp->attr.pbl_addr - 685 uresp.pbl_addr = (mhp->attr.pbl_addr -
651 rhp->rdev.rnic_info.pbl_base) >> 3; 686 rhp->rdev.rnic_info.pbl_base) >> 3;
652 PDBG("%s user resp pbl_addr 0x%x\n", __func__, 687 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
653 uresp.pbl_addr); 688 uresp.pbl_addr);
654 689
@@ -661,6 +696,9 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
661 696
662 return &mhp->ibmr; 697 return &mhp->ibmr;
663 698
699err_pbl:
700 iwch_free_pbl(mhp);
701
664err: 702err:
665 ib_umem_release(mhp->umem); 703 ib_umem_release(mhp->umem);
666 kfree(mhp); 704 kfree(mhp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index db5100d27ca..836163fc542 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -340,14 +340,14 @@ int iwch_quiesce_qps(struct iwch_cq *chp);
340int iwch_resume_qps(struct iwch_cq *chp); 340int iwch_resume_qps(struct iwch_cq *chp);
341void stop_read_rep_timer(struct iwch_qp *qhp); 341void stop_read_rep_timer(struct iwch_qp *qhp);
342int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 342int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
343 struct iwch_mr *mhp, 343 struct iwch_mr *mhp, int shift);
344 int shift,
345 __be64 *page_list);
346int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 344int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
347 struct iwch_mr *mhp, 345 struct iwch_mr *mhp,
348 int shift, 346 int shift,
349 __be64 *page_list,
350 int npages); 347 int npages);
348int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
349void iwch_free_pbl(struct iwch_mr *mhp);
350int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
351int build_phys_page_list(struct ib_phys_buf *buffer_list, 351int build_phys_page_list(struct ib_phys_buf *buffer_list,
352 int num_phys_buf, 352 int num_phys_buf,
353 u64 *iova_start, 353 u64 *iova_start,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 9b4be889c58..79dbe5beae5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -655,6 +655,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
655{ 655{
656 struct iwch_cq *rchp, *schp; 656 struct iwch_cq *rchp, *schp;
657 int count; 657 int count;
658 int flushed;
658 659
659 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 660 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
660 schp = get_chp(qhp->rhp, qhp->attr.scq); 661 schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -669,20 +670,22 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
669 spin_lock(&qhp->lock); 670 spin_lock(&qhp->lock);
670 cxio_flush_hw_cq(&rchp->cq); 671 cxio_flush_hw_cq(&rchp->cq);
671 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); 672 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
672 cxio_flush_rq(&qhp->wq, &rchp->cq, count); 673 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
673 spin_unlock(&qhp->lock); 674 spin_unlock(&qhp->lock);
674 spin_unlock_irqrestore(&rchp->lock, *flag); 675 spin_unlock_irqrestore(&rchp->lock, *flag);
675 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 676 if (flushed)
677 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
676 678
677 /* locking heirarchy: cq lock first, then qp lock. */ 679 /* locking heirarchy: cq lock first, then qp lock. */
678 spin_lock_irqsave(&schp->lock, *flag); 680 spin_lock_irqsave(&schp->lock, *flag);
679 spin_lock(&qhp->lock); 681 spin_lock(&qhp->lock);
680 cxio_flush_hw_cq(&schp->cq); 682 cxio_flush_hw_cq(&schp->cq);
681 cxio_count_scqes(&schp->cq, &qhp->wq, &count); 683 cxio_count_scqes(&schp->cq, &qhp->wq, &count);
682 cxio_flush_sq(&qhp->wq, &schp->cq, count); 684 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
683 spin_unlock(&qhp->lock); 685 spin_unlock(&qhp->lock);
684 spin_unlock_irqrestore(&schp->lock, *flag); 686 spin_unlock_irqrestore(&schp->lock, *flag);
685 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 687 if (flushed)
688 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
686 689
687 /* deref */ 690 /* deref */
688 if (atomic_dec_and_test(&qhp->refcnt)) 691 if (atomic_dec_and_test(&qhp->refcnt))
@@ -880,7 +883,6 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
880 ep = qhp->ep; 883 ep = qhp->ep;
881 get_ep(&ep->com); 884 get_ep(&ep->com);
882 } 885 }
883 flush_qp(qhp, &flag);
884 break; 886 break;
885 case IWCH_QP_STATE_TERMINATE: 887 case IWCH_QP_STATE_TERMINATE:
886 qhp->attr.state = IWCH_QP_STATE_TERMINATE; 888 qhp->attr.state = IWCH_QP_STATE_TERMINATE;
@@ -911,6 +913,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
911 } 913 }
912 switch (attrs->next_state) { 914 switch (attrs->next_state) {
913 case IWCH_QP_STATE_IDLE: 915 case IWCH_QP_STATE_IDLE:
916 flush_qp(qhp, &flag);
914 qhp->attr.state = IWCH_QP_STATE_IDLE; 917 qhp->attr.state = IWCH_QP_STATE_IDLE;
915 qhp->attr.llp_stream_handle = NULL; 918 qhp->attr.llp_stream_handle = NULL;
916 put_ep(&qhp->ep->com); 919 put_ep(&qhp->ep->com);