diff options
author | Dennis Dalessandro <dennis.dalessandro@intel.com> | 2016-02-14 15:11:20 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-03-10 20:38:10 -0500 |
commit | 79a225be38932b17707009767e85d6edf450e7cc (patch) | |
tree | 5359d4de3bcc764ec6014479d6b6954a245c4e73 | |
parent | 0765b01b8e2da50ad56f6544f935f5eaef1389f2 (diff) |
IB/rdmavt: Remove unnecessary exported functions
Remove exported functions which are no longer required as the
functionality has moved into rdmavt. This also requires re-ordering some
of the functions since their prototype no longer appears in a header
file. Rather than add forward declarations it is just cleaner to
re-order some of the functions.
Reviewed-by: Jubin John <jubin.john@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/sw/rdmavt/mmap.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/sw/rdmavt/mmap.h | 8 | ||||
-rw-r--r-- | drivers/infiniband/sw/rdmavt/qp.c | 252 | ||||
-rw-r--r-- | drivers/infiniband/sw/rdmavt/srq.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/sw/rdmavt/vt.h | 1 | ||||
-rw-r--r-- | include/rdma/rdma_vt.h | 13 | ||||
-rw-r--r-- | include/rdma/rdmavt_qp.h | 4 |
7 files changed, 128 insertions, 155 deletions
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index 273974fb7d1f..e202b8142759 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c | |||
@@ -80,7 +80,6 @@ void rvt_release_mmap_info(struct kref *ref) | |||
80 | vfree(ip->obj); | 80 | vfree(ip->obj); |
81 | kfree(ip); | 81 | kfree(ip); |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(rvt_release_mmap_info); | ||
84 | 83 | ||
85 | static void rvt_vma_open(struct vm_area_struct *vma) | 84 | static void rvt_vma_open(struct vm_area_struct *vma) |
86 | { | 85 | { |
@@ -146,7 +145,6 @@ int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
146 | done: | 145 | done: |
147 | return ret; | 146 | return ret; |
148 | } | 147 | } |
149 | EXPORT_SYMBOL(rvt_mmap); | ||
150 | 148 | ||
151 | /** | 149 | /** |
152 | * rvt_create_mmap_info - allocate information for hfi1_mmap | 150 | * rvt_create_mmap_info - allocate information for hfi1_mmap |
@@ -185,7 +183,6 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, | |||
185 | 183 | ||
186 | return ip; | 184 | return ip; |
187 | } | 185 | } |
188 | EXPORT_SYMBOL(rvt_create_mmap_info); | ||
189 | 186 | ||
190 | /** | 187 | /** |
191 | * rvt_update_mmap_info - update a mem map | 188 | * rvt_update_mmap_info - update a mem map |
@@ -209,4 +206,3 @@ void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip, | |||
209 | ip->size = size; | 206 | ip->size = size; |
210 | ip->obj = obj; | 207 | ip->obj = obj; |
211 | } | 208 | } |
212 | EXPORT_SYMBOL(rvt_update_mmap_info); | ||
diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h index e8067471c722..fab0e7b1daf9 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.h +++ b/drivers/infiniband/sw/rdmavt/mmap.h | |||
@@ -51,5 +51,13 @@ | |||
51 | #include <rdma/rdma_vt.h> | 51 | #include <rdma/rdma_vt.h> |
52 | 52 | ||
53 | void rvt_mmap_init(struct rvt_dev_info *rdi); | 53 | void rvt_mmap_init(struct rvt_dev_info *rdi); |
54 | void rvt_release_mmap_info(struct kref *ref); | ||
55 | int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | ||
56 | struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, | ||
57 | u32 size, | ||
58 | struct ib_ucontext *context, | ||
59 | void *obj); | ||
60 | void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip, | ||
61 | u32 size, void *obj); | ||
54 | 62 | ||
55 | #endif /* DEF_RDMAVTMMAP_H */ | 63 | #endif /* DEF_RDMAVTMMAP_H */ |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 441e17a0467f..dbf124db1fd1 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -390,12 +390,116 @@ static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) | |||
390 | } | 390 | } |
391 | 391 | ||
392 | /** | 392 | /** |
393 | * rvt_clear_mr_refs - Drop help mr refs | ||
394 | * @qp: rvt qp data structure | ||
395 | * @clr_sends: If shoudl clear send side or not | ||
396 | */ | ||
397 | static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) | ||
398 | { | ||
399 | unsigned n; | ||
400 | |||
401 | if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) | ||
402 | rvt_put_ss(&qp->s_rdma_read_sge); | ||
403 | |||
404 | rvt_put_ss(&qp->r_sge); | ||
405 | |||
406 | if (clr_sends) { | ||
407 | while (qp->s_last != qp->s_head) { | ||
408 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); | ||
409 | unsigned i; | ||
410 | |||
411 | for (i = 0; i < wqe->wr.num_sge; i++) { | ||
412 | struct rvt_sge *sge = &wqe->sg_list[i]; | ||
413 | |||
414 | rvt_put_mr(sge->mr); | ||
415 | } | ||
416 | if (qp->ibqp.qp_type == IB_QPT_UD || | ||
417 | qp->ibqp.qp_type == IB_QPT_SMI || | ||
418 | qp->ibqp.qp_type == IB_QPT_GSI) | ||
419 | atomic_dec(&ibah_to_rvtah( | ||
420 | wqe->ud_wr.ah)->refcount); | ||
421 | if (++qp->s_last >= qp->s_size) | ||
422 | qp->s_last = 0; | ||
423 | smp_wmb(); /* see qp_set_savail */ | ||
424 | } | ||
425 | if (qp->s_rdma_mr) { | ||
426 | rvt_put_mr(qp->s_rdma_mr); | ||
427 | qp->s_rdma_mr = NULL; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | if (qp->ibqp.qp_type != IB_QPT_RC) | ||
432 | return; | ||
433 | |||
434 | for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { | ||
435 | struct rvt_ack_entry *e = &qp->s_ack_queue[n]; | ||
436 | |||
437 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | ||
438 | e->rdma_sge.mr) { | ||
439 | rvt_put_mr(e->rdma_sge.mr); | ||
440 | e->rdma_sge.mr = NULL; | ||
441 | } | ||
442 | } | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * rvt_remove_qp - remove qp form table | ||
447 | * @rdi: rvt dev struct | ||
448 | * @qp: qp to remove | ||
449 | * | ||
450 | * Remove the QP from the table so it can't be found asynchronously by | ||
451 | * the receive routine. | ||
452 | */ | ||
453 | static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) | ||
454 | { | ||
455 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; | ||
456 | u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); | ||
457 | unsigned long flags; | ||
458 | int removed = 1; | ||
459 | |||
460 | spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); | ||
461 | |||
462 | if (rcu_dereference_protected(rvp->qp[0], | ||
463 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { | ||
464 | RCU_INIT_POINTER(rvp->qp[0], NULL); | ||
465 | } else if (rcu_dereference_protected(rvp->qp[1], | ||
466 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { | ||
467 | RCU_INIT_POINTER(rvp->qp[1], NULL); | ||
468 | } else { | ||
469 | struct rvt_qp *q; | ||
470 | struct rvt_qp __rcu **qpp; | ||
471 | |||
472 | removed = 0; | ||
473 | qpp = &rdi->qp_dev->qp_table[n]; | ||
474 | for (; (q = rcu_dereference_protected(*qpp, | ||
475 | lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; | ||
476 | qpp = &q->next) { | ||
477 | if (q == qp) { | ||
478 | RCU_INIT_POINTER(*qpp, | ||
479 | rcu_dereference_protected(qp->next, | ||
480 | lockdep_is_held(&rdi->qp_dev->qpt_lock))); | ||
481 | removed = 1; | ||
482 | trace_rvt_qpremove(qp, n); | ||
483 | break; | ||
484 | } | ||
485 | } | ||
486 | } | ||
487 | |||
488 | spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); | ||
489 | if (removed) { | ||
490 | synchronize_rcu(); | ||
491 | if (atomic_dec_and_test(&qp->refcount)) | ||
492 | wake_up(&qp->wait); | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /** | ||
393 | * reset_qp - initialize the QP state to the reset state | 497 | * reset_qp - initialize the QP state to the reset state |
394 | * @qp: the QP to reset | 498 | * @qp: the QP to reset |
395 | * @type: the QP type | 499 | * @type: the QP type |
396 | * r and s lock are required to be held by the caller | 500 | * r and s lock are required to be held by the caller |
397 | */ | 501 | */ |
398 | void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | 502 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
399 | enum ib_qp_type type) | 503 | enum ib_qp_type type) |
400 | { | 504 | { |
401 | if (qp->state != IB_QPS_RESET) { | 505 | if (qp->state != IB_QPS_RESET) { |
@@ -475,7 +579,6 @@ void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |||
475 | } | 579 | } |
476 | qp->r_sge.num_sge = 0; | 580 | qp->r_sge.num_sge = 0; |
477 | } | 581 | } |
478 | EXPORT_SYMBOL(rvt_reset_qp); | ||
479 | 582 | ||
480 | /** | 583 | /** |
481 | * rvt_create_qp - create a queue pair for a device | 584 | * rvt_create_qp - create a queue pair for a device |
@@ -762,60 +865,6 @@ bail_swq: | |||
762 | } | 865 | } |
763 | 866 | ||
764 | /** | 867 | /** |
765 | * rvt_clear_mr_refs - Drop help mr refs | ||
766 | * @qp: rvt qp data structure | ||
767 | * @clr_sends: If shoudl clear send side or not | ||
768 | */ | ||
769 | void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) | ||
770 | { | ||
771 | unsigned n; | ||
772 | |||
773 | if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) | ||
774 | rvt_put_ss(&qp->s_rdma_read_sge); | ||
775 | |||
776 | rvt_put_ss(&qp->r_sge); | ||
777 | |||
778 | if (clr_sends) { | ||
779 | while (qp->s_last != qp->s_head) { | ||
780 | struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); | ||
781 | unsigned i; | ||
782 | |||
783 | for (i = 0; i < wqe->wr.num_sge; i++) { | ||
784 | struct rvt_sge *sge = &wqe->sg_list[i]; | ||
785 | |||
786 | rvt_put_mr(sge->mr); | ||
787 | } | ||
788 | if (qp->ibqp.qp_type == IB_QPT_UD || | ||
789 | qp->ibqp.qp_type == IB_QPT_SMI || | ||
790 | qp->ibqp.qp_type == IB_QPT_GSI) | ||
791 | atomic_dec(&ibah_to_rvtah( | ||
792 | wqe->ud_wr.ah)->refcount); | ||
793 | if (++qp->s_last >= qp->s_size) | ||
794 | qp->s_last = 0; | ||
795 | smp_wmb(); /* see qp_set_savail */ | ||
796 | } | ||
797 | if (qp->s_rdma_mr) { | ||
798 | rvt_put_mr(qp->s_rdma_mr); | ||
799 | qp->s_rdma_mr = NULL; | ||
800 | } | ||
801 | } | ||
802 | |||
803 | if (qp->ibqp.qp_type != IB_QPT_RC) | ||
804 | return; | ||
805 | |||
806 | for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { | ||
807 | struct rvt_ack_entry *e = &qp->s_ack_queue[n]; | ||
808 | |||
809 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | ||
810 | e->rdma_sge.mr) { | ||
811 | rvt_put_mr(e->rdma_sge.mr); | ||
812 | e->rdma_sge.mr = NULL; | ||
813 | } | ||
814 | } | ||
815 | } | ||
816 | EXPORT_SYMBOL(rvt_clear_mr_refs); | ||
817 | |||
818 | /** | ||
819 | * rvt_error_qp - put a QP into the error state | 868 | * rvt_error_qp - put a QP into the error state |
820 | * @qp: the QP to put into the error state | 869 | * @qp: the QP to put into the error state |
821 | * @err: the receive completion error to signal if a RWQE is active | 870 | * @err: the receive completion error to signal if a RWQE is active |
@@ -923,58 +972,6 @@ static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |||
923 | } | 972 | } |
924 | 973 | ||
925 | /** | 974 | /** |
926 | * rvt_remove_qp - remove qp form table | ||
927 | * @rdi: rvt dev struct | ||
928 | * @qp: qp to remove | ||
929 | * | ||
930 | * Remove the QP from the table so it can't be found asynchronously by | ||
931 | * the receive routine. | ||
932 | */ | ||
933 | void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) | ||
934 | { | ||
935 | struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1]; | ||
936 | u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); | ||
937 | unsigned long flags; | ||
938 | int removed = 1; | ||
939 | |||
940 | spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags); | ||
941 | |||
942 | if (rcu_dereference_protected(rvp->qp[0], | ||
943 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { | ||
944 | RCU_INIT_POINTER(rvp->qp[0], NULL); | ||
945 | } else if (rcu_dereference_protected(rvp->qp[1], | ||
946 | lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) { | ||
947 | RCU_INIT_POINTER(rvp->qp[1], NULL); | ||
948 | } else { | ||
949 | struct rvt_qp *q; | ||
950 | struct rvt_qp __rcu **qpp; | ||
951 | |||
952 | removed = 0; | ||
953 | qpp = &rdi->qp_dev->qp_table[n]; | ||
954 | for (; (q = rcu_dereference_protected(*qpp, | ||
955 | lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL; | ||
956 | qpp = &q->next) { | ||
957 | if (q == qp) { | ||
958 | RCU_INIT_POINTER(*qpp, | ||
959 | rcu_dereference_protected(qp->next, | ||
960 | lockdep_is_held(&rdi->qp_dev->qpt_lock))); | ||
961 | removed = 1; | ||
962 | trace_rvt_qpremove(qp, n); | ||
963 | break; | ||
964 | } | ||
965 | } | ||
966 | } | ||
967 | |||
968 | spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags); | ||
969 | if (removed) { | ||
970 | synchronize_rcu(); | ||
971 | if (atomic_dec_and_test(&qp->refcount)) | ||
972 | wake_up(&qp->wait); | ||
973 | } | ||
974 | } | ||
975 | EXPORT_SYMBOL(rvt_remove_qp); | ||
976 | |||
977 | /** | ||
978 | * qib_modify_qp - modify the attributes of a queue pair | 975 | * qib_modify_qp - modify the attributes of a queue pair |
979 | * @ibqp: the queue pair who's attributes we're modifying | 976 | * @ibqp: the queue pair who's attributes we're modifying |
980 | * @attr: the new attributes | 977 | * @attr: the new attributes |
@@ -1234,6 +1231,19 @@ inval: | |||
1234 | return -EINVAL; | 1231 | return -EINVAL; |
1235 | } | 1232 | } |
1236 | 1233 | ||
1234 | /** rvt_free_qpn - Free a qpn from the bit map | ||
1235 | * @qpt: QP table | ||
1236 | * @qpn: queue pair number to free | ||
1237 | */ | ||
1238 | static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) | ||
1239 | { | ||
1240 | struct rvt_qpn_map *map; | ||
1241 | |||
1242 | map = qpt->map + qpn / RVT_BITS_PER_PAGE; | ||
1243 | if (map->page) | ||
1244 | clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); | ||
1245 | } | ||
1246 | |||
1237 | /** | 1247 | /** |
1238 | * rvt_destroy_qp - destroy a queue pair | 1248 | * rvt_destroy_qp - destroy a queue pair |
1239 | * @ibqp: the queue pair to destroy | 1249 | * @ibqp: the queue pair to destroy |
@@ -1664,29 +1674,3 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
1664 | } | 1674 | } |
1665 | return 0; | 1675 | return 0; |
1666 | } | 1676 | } |
1667 | |||
1668 | /** rvt_free_qpn - Free a qpn from the bit map | ||
1669 | * @qpt: QP table | ||
1670 | * @qpn: queue pair number to free | ||
1671 | */ | ||
1672 | void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn) | ||
1673 | { | ||
1674 | struct rvt_qpn_map *map; | ||
1675 | |||
1676 | map = qpt->map + qpn / RVT_BITS_PER_PAGE; | ||
1677 | if (map->page) | ||
1678 | clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); | ||
1679 | } | ||
1680 | EXPORT_SYMBOL(rvt_free_qpn); | ||
1681 | |||
1682 | /** | ||
1683 | * rvt_dec_qp_cnt - decrement qp count | ||
1684 | * rdi: rvt dev struct | ||
1685 | */ | ||
1686 | void rvt_dec_qp_cnt(struct rvt_dev_info *rdi) | ||
1687 | { | ||
1688 | spin_lock(&rdi->n_qps_lock); | ||
1689 | rdi->n_qps_allocated--; | ||
1690 | spin_unlock(&rdi->n_qps_lock); | ||
1691 | } | ||
1692 | EXPORT_SYMBOL(rvt_dec_qp_cnt); | ||
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c index 98c492797c53..f7c48e9023de 100644 --- a/drivers/infiniband/sw/rdmavt/srq.c +++ b/drivers/infiniband/sw/rdmavt/srq.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/vmalloc.h> | 50 | #include <linux/vmalloc.h> |
51 | 51 | ||
52 | #include "srq.h" | 52 | #include "srq.h" |
53 | #include "vt.h" | ||
53 | 54 | ||
54 | /** | 55 | /** |
55 | * rvt_driver_srq_init - init srq resources on a per driver basis | 56 | * rvt_driver_srq_init - init srq resources on a per driver basis |
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h index e26f9e94d1ea..6b01eaa4461b 100644 --- a/drivers/infiniband/sw/rdmavt/vt.h +++ b/drivers/infiniband/sw/rdmavt/vt.h | |||
@@ -60,6 +60,7 @@ | |||
60 | #include "mmap.h" | 60 | #include "mmap.h" |
61 | #include "cq.h" | 61 | #include "cq.h" |
62 | #include "mad.h" | 62 | #include "mad.h" |
63 | #include "mmap.h" | ||
63 | 64 | ||
64 | #define rvt_pr_info(rdi, fmt, ...) \ | 65 | #define rvt_pr_info(rdi, fmt, ...) \ |
65 | __rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \ | 66 | __rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \ |
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 4c50bbb75d77..a8696551abb1 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h | |||
@@ -476,19 +476,6 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, | |||
476 | u32 len, u64 vaddr, u32 rkey, int acc); | 476 | u32 len, u64 vaddr, u32 rkey, int acc); |
477 | int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, | 477 | int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, |
478 | struct rvt_sge *isge, struct ib_sge *sge, int acc); | 478 | struct rvt_sge *isge, struct ib_sge *sge, int acc); |
479 | int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | ||
480 | void rvt_release_mmap_info(struct kref *ref); | ||
481 | struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, | ||
482 | u32 size, | ||
483 | struct ib_ucontext *context, | ||
484 | void *obj); | ||
485 | void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip, | ||
486 | u32 size, void *obj); | ||
487 | int rvt_reg_mr(struct rvt_qp *qp, struct ib_reg_wr *wr); | ||
488 | struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid); | 479 | struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid); |
489 | 480 | ||
490 | /* Temporary export */ | ||
491 | void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | ||
492 | enum ib_qp_type type); | ||
493 | |||
494 | #endif /* DEF_RDMA_VT_H */ | 481 | #endif /* DEF_RDMA_VT_H */ |
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 1066b5d1b4d2..933f14f92da6 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h | |||
@@ -438,10 +438,6 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) | |||
438 | extern const int ib_rvt_state_ops[]; | 438 | extern const int ib_rvt_state_ops[]; |
439 | 439 | ||
440 | struct rvt_dev_info; | 440 | struct rvt_dev_info; |
441 | void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp); | ||
442 | void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends); | ||
443 | int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); | 441 | int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); |
444 | void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn); | ||
445 | void rvt_dec_qp_cnt(struct rvt_dev_info *rdi); | ||
446 | 442 | ||
447 | #endif /* DEF_RDMAVT_INCQP_H */ | 443 | #endif /* DEF_RDMAVT_INCQP_H */ |