diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-21 12:52:36 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-21 12:52:36 -0400 |
commit | a748422ee45725e04e1d3792fa19dfa90ddfd116 (patch) | |
tree | 978e12895468baaa9f7ab2747b9f7d50beaf1717 /drivers/infiniband/hw | |
parent | c63e31c2cc1ec67372920b5e1aff8204d04dd172 (diff) | |
parent | f4ffaa452e71495a06376f12f772342bc57051fc (diff) |
Merge branch 'master'
Diffstat (limited to 'drivers/infiniband/hw')
22 files changed, 393 insertions, 180 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index cd533cf951c2..7d3fb6996b41 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c | |||
@@ -365,15 +365,3 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data, | |||
365 | bail: | 365 | bail: |
366 | return ret; | 366 | return ret; |
367 | } | 367 | } |
368 | |||
369 | void ipath_diag_bringup_link(struct ipath_devdata *dd) | ||
370 | { | ||
371 | if (diag_set_link || (dd->ipath_flags & IPATH_LINKACTIVE)) | ||
372 | return; | ||
373 | |||
374 | diag_set_link = 1; | ||
375 | ipath_cdbg(VERBOSE, "Trying to set to set link active for " | ||
376 | "diag pkt\n"); | ||
377 | ipath_layer_set_linkstate(dd, IPATH_IB_LINKARM); | ||
378 | ipath_layer_set_linkstate(dd, IPATH_IB_LINKACTIVE); | ||
379 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 58a94efb0070..e7617c3982ea 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -1729,7 +1729,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq) | |||
1729 | } | 1729 | } |
1730 | } | 1730 | } |
1731 | 1731 | ||
1732 | int __init infinipath_init(void) | 1732 | static int __init infinipath_init(void) |
1733 | { | 1733 | { |
1734 | int ret; | 1734 | int ret; |
1735 | 1735 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 60f5f4108069..0bcb428041f3 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -172,8 +172,8 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
172 | "was %s\n", dd->ipath_unit, | 172 | "was %s\n", dd->ipath_unit, |
173 | ib_linkstate(lstate), | 173 | ib_linkstate(lstate), |
174 | ib_linkstate((unsigned) | 174 | ib_linkstate((unsigned) |
175 | dd->ipath_lastibcstat | 175 | dd->ipath_lastibcstat |
176 | & IPATH_IBSTATE_MASK)); | 176 | & IPATH_IBSTATE_MASK)); |
177 | } | 177 | } |
178 | else { | 178 | else { |
179 | lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; | 179 | lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 159d0aed31a5..0ce5f19c9d62 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -528,7 +528,6 @@ extern spinlock_t ipath_devs_lock; | |||
528 | extern struct ipath_devdata *ipath_lookup(int unit); | 528 | extern struct ipath_devdata *ipath_lookup(int unit); |
529 | 529 | ||
530 | extern u16 ipath_layer_rcv_opcode; | 530 | extern u16 ipath_layer_rcv_opcode; |
531 | extern int ipath_verbs_registered; | ||
532 | extern int __ipath_layer_intr(struct ipath_devdata *, u32); | 531 | extern int __ipath_layer_intr(struct ipath_devdata *, u32); |
533 | extern int ipath_layer_intr(struct ipath_devdata *, u32); | 532 | extern int ipath_layer_intr(struct ipath_devdata *, u32); |
534 | extern int __ipath_layer_rcv(struct ipath_devdata *, void *, | 533 | extern int __ipath_layer_rcv(struct ipath_devdata *, void *, |
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index 2cabf6340572..69ed1100701a 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c | |||
@@ -52,7 +52,7 @@ static int (*layer_rcv)(void *, void *, struct sk_buff *); | |||
52 | static int (*layer_rcv_lid)(void *, void *); | 52 | static int (*layer_rcv_lid)(void *, void *); |
53 | static int (*verbs_piobufavail)(void *); | 53 | static int (*verbs_piobufavail)(void *); |
54 | static void (*verbs_rcv)(void *, void *, void *, u32); | 54 | static void (*verbs_rcv)(void *, void *, void *, u32); |
55 | int ipath_verbs_registered; | 55 | static int ipath_verbs_registered; |
56 | 56 | ||
57 | static void *(*layer_add_one)(int, struct ipath_devdata *); | 57 | static void *(*layer_add_one)(int, struct ipath_devdata *); |
58 | static void (*layer_remove_one)(void *); | 58 | static void (*layer_remove_one)(void *); |
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c index e693a7a82667..e1dc4f757062 100644 --- a/drivers/infiniband/hw/ipath/ipath_pe800.c +++ b/drivers/infiniband/hw/ipath/ipath_pe800.c | |||
@@ -305,8 +305,8 @@ static const struct ipath_cregs ipath_pe_cregs = { | |||
305 | * we'll print them and continue. We reuse the same message buffer as | 305 | * we'll print them and continue. We reuse the same message buffer as |
306 | * ipath_handle_errors() to avoid excessive stack usage. | 306 | * ipath_handle_errors() to avoid excessive stack usage. |
307 | */ | 307 | */ |
308 | void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | 308 | static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, |
309 | size_t msgl) | 309 | size_t msgl) |
310 | { | 310 | { |
311 | ipath_err_t hwerrs; | 311 | ipath_err_t hwerrs; |
312 | u32 bits, ctrl; | 312 | u32 bits, ctrl; |
@@ -552,7 +552,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, | |||
552 | * freeze mode), and enable hardware errors as errors (along with | 552 | * freeze mode), and enable hardware errors as errors (along with |
553 | * everything else) in errormask | 553 | * everything else) in errormask |
554 | */ | 554 | */ |
555 | void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | 555 | static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) |
556 | { | 556 | { |
557 | ipath_err_t val; | 557 | ipath_err_t val; |
558 | u64 extsval; | 558 | u64 extsval; |
@@ -577,7 +577,7 @@ void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | |||
577 | * ipath_pe_bringup_serdes - bring up the serdes | 577 | * ipath_pe_bringup_serdes - bring up the serdes |
578 | * @dd: the infinipath device | 578 | * @dd: the infinipath device |
579 | */ | 579 | */ |
580 | int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | 580 | static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) |
581 | { | 581 | { |
582 | u64 val, tmp, config1; | 582 | u64 val, tmp, config1; |
583 | int ret = 0, change = 0; | 583 | int ret = 0, change = 0; |
@@ -694,7 +694,7 @@ int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | |||
694 | * @dd: the infinipath device | 694 | * @dd: the infinipath device |
695 | * Called when driver is being unloaded | 695 | * Called when driver is being unloaded |
696 | */ | 696 | */ |
697 | void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | 697 | static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) |
698 | { | 698 | { |
699 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | 699 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); |
700 | 700 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 6058d70d7577..18890716db1e 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -188,8 +188,8 @@ static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) | |||
188 | * Allocate the next available QPN and put the QP into the hash table. | 188 | * Allocate the next available QPN and put the QP into the hash table. |
189 | * The hash table holds a reference to the QP. | 189 | * The hash table holds a reference to the QP. |
190 | */ | 190 | */ |
191 | int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, | 191 | static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, |
192 | enum ib_qp_type type) | 192 | enum ib_qp_type type) |
193 | { | 193 | { |
194 | unsigned long flags; | 194 | unsigned long flags; |
195 | u32 qpn; | 195 | u32 qpn; |
@@ -232,7 +232,7 @@ bail: | |||
232 | * Remove the QP from the table so it can't be found asynchronously by | 232 | * Remove the QP from the table so it can't be found asynchronously by |
233 | * the receive interrupt routine. | 233 | * the receive interrupt routine. |
234 | */ | 234 | */ |
235 | void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) | 235 | static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) |
236 | { | 236 | { |
237 | struct ipath_qp *q, **qpp; | 237 | struct ipath_qp *q, **qpp; |
238 | unsigned long flags; | 238 | unsigned long flags; |
@@ -358,6 +358,65 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | /** | 360 | /** |
361 | * ipath_error_qp - put a QP into an error state | ||
362 | * @qp: the QP to put into an error state | ||
363 | * | ||
364 | * Flushes both send and receive work queues. | ||
365 | * QP r_rq.lock and s_lock should be held. | ||
366 | */ | ||
367 | |||
368 | static void ipath_error_qp(struct ipath_qp *qp) | ||
369 | { | ||
370 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
371 | struct ib_wc wc; | ||
372 | |||
373 | _VERBS_INFO("QP%d/%d in error state\n", | ||
374 | qp->ibqp.qp_num, qp->remote_qpn); | ||
375 | |||
376 | spin_lock(&dev->pending_lock); | ||
377 | /* XXX What if its already removed by the timeout code? */ | ||
378 | if (qp->timerwait.next != LIST_POISON1) | ||
379 | list_del(&qp->timerwait); | ||
380 | if (qp->piowait.next != LIST_POISON1) | ||
381 | list_del(&qp->piowait); | ||
382 | spin_unlock(&dev->pending_lock); | ||
383 | |||
384 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
385 | wc.vendor_err = 0; | ||
386 | wc.byte_len = 0; | ||
387 | wc.imm_data = 0; | ||
388 | wc.qp_num = qp->ibqp.qp_num; | ||
389 | wc.src_qp = 0; | ||
390 | wc.wc_flags = 0; | ||
391 | wc.pkey_index = 0; | ||
392 | wc.slid = 0; | ||
393 | wc.sl = 0; | ||
394 | wc.dlid_path_bits = 0; | ||
395 | wc.port_num = 0; | ||
396 | |||
397 | while (qp->s_last != qp->s_head) { | ||
398 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
399 | |||
400 | wc.wr_id = wqe->wr.wr_id; | ||
401 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
402 | if (++qp->s_last >= qp->s_size) | ||
403 | qp->s_last = 0; | ||
404 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
405 | } | ||
406 | qp->s_cur = qp->s_tail = qp->s_head; | ||
407 | qp->s_hdrwords = 0; | ||
408 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
409 | |||
410 | wc.opcode = IB_WC_RECV; | ||
411 | while (qp->r_rq.tail != qp->r_rq.head) { | ||
412 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | ||
413 | if (++qp->r_rq.tail >= qp->r_rq.size) | ||
414 | qp->r_rq.tail = 0; | ||
415 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
416 | } | ||
417 | } | ||
418 | |||
419 | /** | ||
361 | * ipath_modify_qp - modify the attributes of a queue pair | 420 | * ipath_modify_qp - modify the attributes of a queue pair |
362 | * @ibqp: the queue pair who's attributes we're modifying | 421 | * @ibqp: the queue pair who's attributes we're modifying |
363 | * @attr: the new attributes | 422 | * @attr: the new attributes |
@@ -821,65 +880,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) | |||
821 | } | 880 | } |
822 | 881 | ||
823 | /** | 882 | /** |
824 | * ipath_error_qp - put a QP into an error state | ||
825 | * @qp: the QP to put into an error state | ||
826 | * | ||
827 | * Flushes both send and receive work queues. | ||
828 | * QP r_rq.lock and s_lock should be held. | ||
829 | */ | ||
830 | |||
831 | void ipath_error_qp(struct ipath_qp *qp) | ||
832 | { | ||
833 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
834 | struct ib_wc wc; | ||
835 | |||
836 | _VERBS_INFO("QP%d/%d in error state\n", | ||
837 | qp->ibqp.qp_num, qp->remote_qpn); | ||
838 | |||
839 | spin_lock(&dev->pending_lock); | ||
840 | /* XXX What if its already removed by the timeout code? */ | ||
841 | if (qp->timerwait.next != LIST_POISON1) | ||
842 | list_del(&qp->timerwait); | ||
843 | if (qp->piowait.next != LIST_POISON1) | ||
844 | list_del(&qp->piowait); | ||
845 | spin_unlock(&dev->pending_lock); | ||
846 | |||
847 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
848 | wc.vendor_err = 0; | ||
849 | wc.byte_len = 0; | ||
850 | wc.imm_data = 0; | ||
851 | wc.qp_num = qp->ibqp.qp_num; | ||
852 | wc.src_qp = 0; | ||
853 | wc.wc_flags = 0; | ||
854 | wc.pkey_index = 0; | ||
855 | wc.slid = 0; | ||
856 | wc.sl = 0; | ||
857 | wc.dlid_path_bits = 0; | ||
858 | wc.port_num = 0; | ||
859 | |||
860 | while (qp->s_last != qp->s_head) { | ||
861 | struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
862 | |||
863 | wc.wr_id = wqe->wr.wr_id; | ||
864 | wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
865 | if (++qp->s_last >= qp->s_size) | ||
866 | qp->s_last = 0; | ||
867 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | ||
868 | } | ||
869 | qp->s_cur = qp->s_tail = qp->s_head; | ||
870 | qp->s_hdrwords = 0; | ||
871 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
872 | |||
873 | wc.opcode = IB_WC_RECV; | ||
874 | while (qp->r_rq.tail != qp->r_rq.head) { | ||
875 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; | ||
876 | if (++qp->r_rq.tail >= qp->r_rq.size) | ||
877 | qp->r_rq.tail = 0; | ||
878 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
879 | } | ||
880 | } | ||
881 | |||
882 | /** | ||
883 | * ipath_get_credit - flush the send work queue of a QP | 883 | * ipath_get_credit - flush the send work queue of a QP |
884 | * @qp: the qp who's send work queue to flush | 884 | * @qp: the qp who's send work queue to flush |
885 | * @aeth: the Acknowledge Extended Transport Header | 885 | * @aeth: the Acknowledge Extended Transport Header |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 5ff3de6128b2..01cfb30ee160 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -46,8 +46,8 @@ | |||
46 | * This is called from ipath_post_ud_send() to forward a WQE addressed | 46 | * This is called from ipath_post_ud_send() to forward a WQE addressed |
47 | * to the same HCA. | 47 | * to the same HCA. |
48 | */ | 48 | */ |
49 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | 49 | static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, |
50 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc) | 50 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc) |
51 | { | 51 | { |
52 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); | 52 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); |
53 | struct ipath_qp *qp; | 53 | struct ipath_qp *qp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 9f27fd35cdbb..8d2558a01f35 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -41,7 +41,7 @@ | |||
41 | /* Not static, because we don't want the compiler removing it */ | 41 | /* Not static, because we don't want the compiler removing it */ |
42 | const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; | 42 | const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; |
43 | 43 | ||
44 | unsigned int ib_ipath_qp_table_size = 251; | 44 | static unsigned int ib_ipath_qp_table_size = 251; |
45 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); | 45 | module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); |
46 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | 46 | MODULE_PARM_DESC(qp_table_size, "QP table size"); |
47 | 47 | ||
@@ -87,7 +87,7 @@ const enum ib_wc_opcode ib_ipath_wc_opcode[] = { | |||
87 | /* | 87 | /* |
88 | * System image GUID. | 88 | * System image GUID. |
89 | */ | 89 | */ |
90 | __be64 sys_image_guid; | 90 | static __be64 sys_image_guid; |
91 | 91 | ||
92 | /** | 92 | /** |
93 | * ipath_copy_sge - copy data to SGE memory | 93 | * ipath_copy_sge - copy data to SGE memory |
@@ -1110,7 +1110,7 @@ static void ipath_unregister_ib_device(void *arg) | |||
1110 | ib_dealloc_device(ibdev); | 1110 | ib_dealloc_device(ibdev); |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | int __init ipath_verbs_init(void) | 1113 | static int __init ipath_verbs_init(void) |
1114 | { | 1114 | { |
1115 | return ipath_verbs_register(ipath_register_ib_device, | 1115 | return ipath_verbs_register(ipath_register_ib_device, |
1116 | ipath_unregister_ib_device, | 1116 | ipath_unregister_ib_device, |
@@ -1118,33 +1118,33 @@ int __init ipath_verbs_init(void) | |||
1118 | ipath_ib_timer); | 1118 | ipath_ib_timer); |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | void __exit ipath_verbs_cleanup(void) | 1121 | static void __exit ipath_verbs_cleanup(void) |
1122 | { | 1122 | { |
1123 | ipath_verbs_unregister(); | 1123 | ipath_verbs_unregister(); |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static ssize_t show_rev(struct class_device *cdev, char *buf) | 1126 | static ssize_t show_rev(struct class_device *cdev, char *buf) |
1127 | { | 1127 | { |
1128 | struct ipath_ibdev *dev = | 1128 | struct ipath_ibdev *dev = |
1129 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1129 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1130 | int vendor, boardrev, majrev, minrev; | 1130 | int vendor, boardrev, majrev, minrev; |
1131 | 1131 | ||
1132 | ipath_layer_query_device(dev->dd, &vendor, &boardrev, | 1132 | ipath_layer_query_device(dev->dd, &vendor, &boardrev, |
1133 | &majrev, &minrev); | 1133 | &majrev, &minrev); |
1134 | return sprintf(buf, "%d.%d\n", majrev, minrev); | 1134 | return sprintf(buf, "%d.%d\n", majrev, minrev); |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | static ssize_t show_hca(struct class_device *cdev, char *buf) | 1137 | static ssize_t show_hca(struct class_device *cdev, char *buf) |
1138 | { | 1138 | { |
1139 | struct ipath_ibdev *dev = | 1139 | struct ipath_ibdev *dev = |
1140 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1140 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1141 | int ret; | 1141 | int ret; |
1142 | 1142 | ||
1143 | ret = ipath_layer_get_boardname(dev->dd, buf, 128); | 1143 | ret = ipath_layer_get_boardname(dev->dd, buf, 128); |
1144 | if (ret < 0) | 1144 | if (ret < 0) |
1145 | goto bail; | 1145 | goto bail; |
1146 | strcat(buf, "\n"); | 1146 | strcat(buf, "\n"); |
1147 | ret = strlen(buf); | 1147 | ret = strlen(buf); |
1148 | 1148 | ||
1149 | bail: | 1149 | bail: |
1150 | return ret; | 1150 | return ret; |
@@ -1152,40 +1152,40 @@ bail: | |||
1152 | 1152 | ||
1153 | static ssize_t show_stats(struct class_device *cdev, char *buf) | 1153 | static ssize_t show_stats(struct class_device *cdev, char *buf) |
1154 | { | 1154 | { |
1155 | struct ipath_ibdev *dev = | 1155 | struct ipath_ibdev *dev = |
1156 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); | 1156 | container_of(cdev, struct ipath_ibdev, ibdev.class_dev); |
1157 | int i; | 1157 | int i; |
1158 | int len; | 1158 | int len; |
1159 | 1159 | ||
1160 | len = sprintf(buf, | 1160 | len = sprintf(buf, |
1161 | "RC resends %d\n" | 1161 | "RC resends %d\n" |
1162 | "RC QACKs %d\n" | 1162 | "RC QACKs %d\n" |
1163 | "RC ACKs %d\n" | 1163 | "RC ACKs %d\n" |
1164 | "RC SEQ NAKs %d\n" | 1164 | "RC SEQ NAKs %d\n" |
1165 | "RC RDMA seq %d\n" | 1165 | "RC RDMA seq %d\n" |
1166 | "RC RNR NAKs %d\n" | 1166 | "RC RNR NAKs %d\n" |
1167 | "RC OTH NAKs %d\n" | 1167 | "RC OTH NAKs %d\n" |
1168 | "RC timeouts %d\n" | 1168 | "RC timeouts %d\n" |
1169 | "RC RDMA dup %d\n" | 1169 | "RC RDMA dup %d\n" |
1170 | "piobuf wait %d\n" | 1170 | "piobuf wait %d\n" |
1171 | "no piobuf %d\n" | 1171 | "no piobuf %d\n" |
1172 | "PKT drops %d\n" | 1172 | "PKT drops %d\n" |
1173 | "WQE errs %d\n", | 1173 | "WQE errs %d\n", |
1174 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, | 1174 | dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, |
1175 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, | 1175 | dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, |
1176 | dev->n_other_naks, dev->n_timeouts, | 1176 | dev->n_other_naks, dev->n_timeouts, |
1177 | dev->n_rdma_dup_busy, dev->n_piowait, | 1177 | dev->n_rdma_dup_busy, dev->n_piowait, |
1178 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); | 1178 | dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); |
1179 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { | 1179 | for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { |
1180 | const struct ipath_opcode_stats *si = &dev->opstats[i]; | 1180 | const struct ipath_opcode_stats *si = &dev->opstats[i]; |
1181 | 1181 | ||
1182 | if (!si->n_packets && !si->n_bytes) | 1182 | if (!si->n_packets && !si->n_bytes) |
1183 | continue; | 1183 | continue; |
1184 | len += sprintf(buf + len, "%02x %llu/%llu\n", i, | 1184 | len += sprintf(buf + len, "%02x %llu/%llu\n", i, |
1185 | (unsigned long long) si->n_packets, | 1185 | (unsigned long long) si->n_packets, |
1186 | (unsigned long long) si->n_bytes); | 1186 | (unsigned long long) si->n_bytes); |
1187 | } | 1187 | } |
1188 | return len; | 1188 | return len; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | 1191 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
@@ -1194,25 +1194,25 @@ static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); | |||
1194 | static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); | 1194 | static CLASS_DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); |
1195 | 1195 | ||
1196 | static struct class_device_attribute *ipath_class_attributes[] = { | 1196 | static struct class_device_attribute *ipath_class_attributes[] = { |
1197 | &class_device_attr_hw_rev, | 1197 | &class_device_attr_hw_rev, |
1198 | &class_device_attr_hca_type, | 1198 | &class_device_attr_hca_type, |
1199 | &class_device_attr_board_id, | 1199 | &class_device_attr_board_id, |
1200 | &class_device_attr_stats | 1200 | &class_device_attr_stats |
1201 | }; | 1201 | }; |
1202 | 1202 | ||
1203 | static int ipath_verbs_register_sysfs(struct ib_device *dev) | 1203 | static int ipath_verbs_register_sysfs(struct ib_device *dev) |
1204 | { | 1204 | { |
1205 | int i; | 1205 | int i; |
1206 | int ret; | 1206 | int ret; |
1207 | 1207 | ||
1208 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) | 1208 | for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) |
1209 | if (class_device_create_file(&dev->class_dev, | 1209 | if (class_device_create_file(&dev->class_dev, |
1210 | ipath_class_attributes[i])) { | 1210 | ipath_class_attributes[i])) { |
1211 | ret = 1; | 1211 | ret = 1; |
1212 | goto bail; | 1212 | goto bail; |
1213 | } | 1213 | } |
1214 | 1214 | ||
1215 | ret = 0; | 1215 | ret = 0; |
1216 | 1216 | ||
1217 | bail: | 1217 | bail: |
1218 | return ret; | 1218 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index b824632b2a8c..fcafbc7c9e71 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -577,8 +577,6 @@ int ipath_init_qp_table(struct ipath_ibdev *idev, int size); | |||
577 | 577 | ||
578 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); | 578 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); |
579 | 579 | ||
580 | void ipath_error_qp(struct ipath_qp *qp); | ||
581 | |||
582 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); | 580 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); |
583 | 581 | ||
584 | void ipath_do_rc_send(unsigned long data); | 582 | void ipath_do_rc_send(unsigned long data); |
@@ -607,9 +605,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |||
607 | 605 | ||
608 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); | 606 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); |
609 | 607 | ||
610 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | ||
611 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc); | ||
612 | |||
613 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); | 608 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); |
614 | 609 | ||
615 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | 610 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, |
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig index e88be85b3d5c..9aa5a4468a75 100644 --- a/drivers/infiniband/hw/mthca/Kconfig +++ b/drivers/infiniband/hw/mthca/Kconfig | |||
@@ -7,10 +7,11 @@ config INFINIBAND_MTHCA | |||
7 | ("Tavor") and the MT25208 PCI Express HCA ("Arbel"). | 7 | ("Tavor") and the MT25208 PCI Express HCA ("Arbel"). |
8 | 8 | ||
9 | config INFINIBAND_MTHCA_DEBUG | 9 | config INFINIBAND_MTHCA_DEBUG |
10 | bool "Verbose debugging output" | 10 | bool "Verbose debugging output" if EMBEDDED |
11 | depends on INFINIBAND_MTHCA | 11 | depends on INFINIBAND_MTHCA |
12 | default n | 12 | default y |
13 | ---help--- | 13 | ---help--- |
14 | This option causes the mthca driver produce a bunch of debug | 14 | This option causes debugging code to be compiled into the |
15 | messages. Select this is you are developing the driver or | 15 | mthca driver. The output can be turned on via the |
16 | trying to diagnose a problem. | 16 | debug_level module parameter (which can also be set after |
17 | the driver is loaded through sysfs). | ||
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile index 47ec5a7cba0b..e388d95d0cf1 100644 --- a/drivers/infiniband/hw/mthca/Makefile +++ b/drivers/infiniband/hw/mthca/Makefile | |||
@@ -1,7 +1,3 @@ | |||
1 | ifdef CONFIG_INFINIBAND_MTHCA_DEBUG | ||
2 | EXTRA_CFLAGS += -DDEBUG | ||
3 | endif | ||
4 | |||
5 | obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o | 1 | obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o |
6 | 2 | ||
7 | ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ | 3 | ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ |
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index bc5bdcbe51b5..b12aa03be251 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c | |||
@@ -42,6 +42,20 @@ | |||
42 | 42 | ||
43 | #include "mthca_dev.h" | 43 | #include "mthca_dev.h" |
44 | 44 | ||
45 | enum { | ||
46 | MTHCA_RATE_TAVOR_FULL = 0, | ||
47 | MTHCA_RATE_TAVOR_1X = 1, | ||
48 | MTHCA_RATE_TAVOR_4X = 2, | ||
49 | MTHCA_RATE_TAVOR_1X_DDR = 3 | ||
50 | }; | ||
51 | |||
52 | enum { | ||
53 | MTHCA_RATE_MEMFREE_FULL = 0, | ||
54 | MTHCA_RATE_MEMFREE_QUARTER = 1, | ||
55 | MTHCA_RATE_MEMFREE_EIGHTH = 2, | ||
56 | MTHCA_RATE_MEMFREE_HALF = 3 | ||
57 | }; | ||
58 | |||
45 | struct mthca_av { | 59 | struct mthca_av { |
46 | __be32 port_pd; | 60 | __be32 port_pd; |
47 | u8 reserved1; | 61 | u8 reserved1; |
@@ -55,6 +69,90 @@ struct mthca_av { | |||
55 | __be32 dgid[4]; | 69 | __be32 dgid[4]; |
56 | }; | 70 | }; |
57 | 71 | ||
72 | static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate) | ||
73 | { | ||
74 | switch (mthca_rate) { | ||
75 | case MTHCA_RATE_MEMFREE_EIGHTH: | ||
76 | return mult_to_ib_rate(port_rate >> 3); | ||
77 | case MTHCA_RATE_MEMFREE_QUARTER: | ||
78 | return mult_to_ib_rate(port_rate >> 2); | ||
79 | case MTHCA_RATE_MEMFREE_HALF: | ||
80 | return mult_to_ib_rate(port_rate >> 1); | ||
81 | case MTHCA_RATE_MEMFREE_FULL: | ||
82 | default: | ||
83 | return mult_to_ib_rate(port_rate); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate) | ||
88 | { | ||
89 | switch (mthca_rate) { | ||
90 | case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS; | ||
91 | case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS; | ||
92 | case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS; | ||
93 | default: return port_rate; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port) | ||
98 | { | ||
99 | if (mthca_is_memfree(dev)) { | ||
100 | /* Handle old Arbel FW */ | ||
101 | if (dev->limits.stat_rate_support == 0x3 && mthca_rate) | ||
102 | return IB_RATE_2_5_GBPS; | ||
103 | |||
104 | return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]); | ||
105 | } else | ||
106 | return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]); | ||
107 | } | ||
108 | |||
109 | static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate) | ||
110 | { | ||
111 | if (cur_rate <= req_rate) | ||
112 | return 0; | ||
113 | |||
114 | /* | ||
115 | * Inter-packet delay (IPD) to get from rate X down to a rate | ||
116 | * no more than Y is (X - 1) / Y. | ||
117 | */ | ||
118 | switch ((cur_rate - 1) / req_rate) { | ||
119 | case 0: return MTHCA_RATE_MEMFREE_FULL; | ||
120 | case 1: return MTHCA_RATE_MEMFREE_HALF; | ||
121 | case 2: /* fall through */ | ||
122 | case 3: return MTHCA_RATE_MEMFREE_QUARTER; | ||
123 | default: return MTHCA_RATE_MEMFREE_EIGHTH; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static u8 ib_rate_to_tavor(u8 static_rate) | ||
128 | { | ||
129 | switch (static_rate) { | ||
130 | case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X; | ||
131 | case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR; | ||
132 | case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X; | ||
133 | default: return MTHCA_RATE_TAVOR_FULL; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port) | ||
138 | { | ||
139 | u8 rate; | ||
140 | |||
141 | if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1]) | ||
142 | return 0; | ||
143 | |||
144 | if (mthca_is_memfree(dev)) | ||
145 | rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate), | ||
146 | dev->rate[port - 1]); | ||
147 | else | ||
148 | rate = ib_rate_to_tavor(static_rate); | ||
149 | |||
150 | if (!(dev->limits.stat_rate_support & (1 << rate))) | ||
151 | rate = 1; | ||
152 | |||
153 | return rate; | ||
154 | } | ||
155 | |||
58 | int mthca_create_ah(struct mthca_dev *dev, | 156 | int mthca_create_ah(struct mthca_dev *dev, |
59 | struct mthca_pd *pd, | 157 | struct mthca_pd *pd, |
60 | struct ib_ah_attr *ah_attr, | 158 | struct ib_ah_attr *ah_attr, |
@@ -107,7 +205,7 @@ on_hca_fail: | |||
107 | av->g_slid = ah_attr->src_path_bits; | 205 | av->g_slid = ah_attr->src_path_bits; |
108 | av->dlid = cpu_to_be16(ah_attr->dlid); | 206 | av->dlid = cpu_to_be16(ah_attr->dlid); |
109 | av->msg_sr = (3 << 4) | /* 2K message */ | 207 | av->msg_sr = (3 << 4) | /* 2K message */ |
110 | ah_attr->static_rate; | 208 | mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num); |
111 | av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | 209 | av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); |
112 | if (ah_attr->ah_flags & IB_AH_GRH) { | 210 | if (ah_attr->ah_flags & IB_AH_GRH) { |
113 | av->g_slid |= 0x80; | 211 | av->g_slid |= 0x80; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 343eca507870..1985b5dfa481 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -965,6 +965,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
965 | u32 *outbox; | 965 | u32 *outbox; |
966 | u8 field; | 966 | u8 field; |
967 | u16 size; | 967 | u16 size; |
968 | u16 stat_rate; | ||
968 | int err; | 969 | int err; |
969 | 970 | ||
970 | #define QUERY_DEV_LIM_OUT_SIZE 0x100 | 971 | #define QUERY_DEV_LIM_OUT_SIZE 0x100 |
@@ -995,6 +996,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
995 | #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 | 996 | #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 |
996 | #define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 | 997 | #define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 |
997 | #define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b | 998 | #define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b |
999 | #define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET 0x3c | ||
998 | #define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f | 1000 | #define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f |
999 | #define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 | 1001 | #define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 |
1000 | #define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 | 1002 | #define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 |
@@ -1086,6 +1088,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
1086 | dev_lim->num_ports = field & 0xf; | 1088 | dev_lim->num_ports = field & 0xf; |
1087 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); | 1089 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); |
1088 | dev_lim->max_gids = 1 << (field & 0xf); | 1090 | dev_lim->max_gids = 1 << (field & 0xf); |
1091 | MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET); | ||
1092 | dev_lim->stat_rate_support = stat_rate; | ||
1089 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); | 1093 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); |
1090 | dev_lim->max_pkeys = 1 << (field & 0xf); | 1094 | dev_lim->max_pkeys = 1 << (field & 0xf); |
1091 | MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET); | 1095 | MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index e4ec35c40dd3..2f976f2051d6 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h | |||
@@ -146,6 +146,7 @@ struct mthca_dev_lim { | |||
146 | int max_vl; | 146 | int max_vl; |
147 | int num_ports; | 147 | int num_ports; |
148 | int max_gids; | 148 | int max_gids; |
149 | u16 stat_rate_support; | ||
149 | int max_pkeys; | 150 | int max_pkeys; |
150 | u32 flags; | 151 | u32 flags; |
151 | int reserved_uars; | 152 | int reserved_uars; |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index ad52edbefe98..4c1dcb4c1822 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -151,6 +151,7 @@ struct mthca_limits { | |||
151 | int reserved_qps; | 151 | int reserved_qps; |
152 | int num_srqs; | 152 | int num_srqs; |
153 | int max_srq_wqes; | 153 | int max_srq_wqes; |
154 | int max_srq_sge; | ||
154 | int reserved_srqs; | 155 | int reserved_srqs; |
155 | int num_eecs; | 156 | int num_eecs; |
156 | int reserved_eecs; | 157 | int reserved_eecs; |
@@ -172,6 +173,7 @@ struct mthca_limits { | |||
172 | int reserved_pds; | 173 | int reserved_pds; |
173 | u32 page_size_cap; | 174 | u32 page_size_cap; |
174 | u32 flags; | 175 | u32 flags; |
176 | u16 stat_rate_support; | ||
175 | u8 port_width_cap; | 177 | u8 port_width_cap; |
176 | }; | 178 | }; |
177 | 179 | ||
@@ -353,10 +355,24 @@ struct mthca_dev { | |||
353 | struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2]; | 355 | struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2]; |
354 | struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; | 356 | struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; |
355 | spinlock_t sm_lock; | 357 | spinlock_t sm_lock; |
358 | u8 rate[MTHCA_MAX_PORTS]; | ||
356 | }; | 359 | }; |
357 | 360 | ||
358 | #define mthca_dbg(mdev, format, arg...) \ | 361 | #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG |
359 | dev_dbg(&mdev->pdev->dev, format, ## arg) | 362 | extern int mthca_debug_level; |
363 | |||
364 | #define mthca_dbg(mdev, format, arg...) \ | ||
365 | do { \ | ||
366 | if (mthca_debug_level) \ | ||
367 | dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ | ||
368 | } while (0) | ||
369 | |||
370 | #else /* CONFIG_INFINIBAND_MTHCA_DEBUG */ | ||
371 | |||
372 | #define mthca_dbg(mdev, format, arg...) do { (void) mdev; } while (0) | ||
373 | |||
374 | #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */ | ||
375 | |||
360 | #define mthca_err(mdev, format, arg...) \ | 376 | #define mthca_err(mdev, format, arg...) \ |
361 | dev_err(&mdev->pdev->dev, format, ## arg) | 377 | dev_err(&mdev->pdev->dev, format, ## arg) |
362 | #define mthca_info(mdev, format, arg...) \ | 378 | #define mthca_info(mdev, format, arg...) \ |
@@ -492,6 +508,7 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); | |||
492 | int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 508 | int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
493 | enum ib_srq_attr_mask attr_mask); | 509 | enum ib_srq_attr_mask attr_mask); |
494 | int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); | 510 | int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); |
511 | int mthca_max_srq_sge(struct mthca_dev *dev); | ||
495 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, | 512 | void mthca_srq_event(struct mthca_dev *dev, u32 srqn, |
496 | enum ib_event_type event_type); | 513 | enum ib_event_type event_type); |
497 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); | 514 | void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); |
@@ -542,6 +559,8 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, | |||
542 | struct ib_ud_header *header); | 559 | struct ib_ud_header *header); |
543 | int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr); | 560 | int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr); |
544 | int mthca_ah_grh_present(struct mthca_ah *ah); | 561 | int mthca_ah_grh_present(struct mthca_ah *ah); |
562 | u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port); | ||
563 | enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port); | ||
545 | 564 | ||
546 | int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | 565 | int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); |
547 | int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | 566 | int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index dfb482eac9a2..4730863ece9a 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -49,6 +49,30 @@ enum { | |||
49 | MTHCA_VENDOR_CLASS2 = 0xa | 49 | MTHCA_VENDOR_CLASS2 = 0xa |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static int mthca_update_rate(struct mthca_dev *dev, u8 port_num) | ||
53 | { | ||
54 | struct ib_port_attr *tprops = NULL; | ||
55 | int ret; | ||
56 | |||
57 | tprops = kmalloc(sizeof *tprops, GFP_KERNEL); | ||
58 | if (!tprops) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | ret = ib_query_port(&dev->ib_dev, port_num, tprops); | ||
62 | if (ret) { | ||
63 | printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n", | ||
64 | ret, dev->ib_dev.name, port_num); | ||
65 | goto out; | ||
66 | } | ||
67 | |||
68 | dev->rate[port_num - 1] = tprops->active_speed * | ||
69 | ib_width_enum_to_int(tprops->active_width); | ||
70 | |||
71 | out: | ||
72 | kfree(tprops); | ||
73 | return ret; | ||
74 | } | ||
75 | |||
52 | static void update_sm_ah(struct mthca_dev *dev, | 76 | static void update_sm_ah(struct mthca_dev *dev, |
53 | u8 port_num, u16 lid, u8 sl) | 77 | u8 port_num, u16 lid, u8 sl) |
54 | { | 78 | { |
@@ -90,6 +114,7 @@ static void smp_snoop(struct ib_device *ibdev, | |||
90 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | 114 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && |
91 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) { | 115 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) { |
92 | if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { | 116 | if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { |
117 | mthca_update_rate(to_mdev(ibdev), port_num); | ||
93 | update_sm_ah(to_mdev(ibdev), port_num, | 118 | update_sm_ah(to_mdev(ibdev), port_num, |
94 | be16_to_cpup((__be16 *) (mad->data + 58)), | 119 | be16_to_cpup((__be16 *) (mad->data + 58)), |
95 | (*(u8 *) (mad->data + 76)) & 0xf); | 120 | (*(u8 *) (mad->data + 76)) & 0xf); |
@@ -246,6 +271,7 @@ int mthca_create_agents(struct mthca_dev *dev) | |||
246 | { | 271 | { |
247 | struct ib_mad_agent *agent; | 272 | struct ib_mad_agent *agent; |
248 | int p, q; | 273 | int p, q; |
274 | int ret; | ||
249 | 275 | ||
250 | spin_lock_init(&dev->sm_lock); | 276 | spin_lock_init(&dev->sm_lock); |
251 | 277 | ||
@@ -255,11 +281,23 @@ int mthca_create_agents(struct mthca_dev *dev) | |||
255 | q ? IB_QPT_GSI : IB_QPT_SMI, | 281 | q ? IB_QPT_GSI : IB_QPT_SMI, |
256 | NULL, 0, send_handler, | 282 | NULL, 0, send_handler, |
257 | NULL, NULL); | 283 | NULL, NULL); |
258 | if (IS_ERR(agent)) | 284 | if (IS_ERR(agent)) { |
285 | ret = PTR_ERR(agent); | ||
259 | goto err; | 286 | goto err; |
287 | } | ||
260 | dev->send_agent[p][q] = agent; | 288 | dev->send_agent[p][q] = agent; |
261 | } | 289 | } |
262 | 290 | ||
291 | |||
292 | for (p = 1; p <= dev->limits.num_ports; ++p) { | ||
293 | ret = mthca_update_rate(dev, p); | ||
294 | if (ret) { | ||
295 | mthca_err(dev, "Failed to obtain port %d rate." | ||
296 | " aborting.\n", p); | ||
297 | goto err; | ||
298 | } | ||
299 | } | ||
300 | |||
263 | return 0; | 301 | return 0; |
264 | 302 | ||
265 | err: | 303 | err: |
@@ -268,7 +306,7 @@ err: | |||
268 | if (dev->send_agent[p][q]) | 306 | if (dev->send_agent[p][q]) |
269 | ib_unregister_mad_agent(dev->send_agent[p][q]); | 307 | ib_unregister_mad_agent(dev->send_agent[p][q]); |
270 | 308 | ||
271 | return PTR_ERR(agent); | 309 | return ret; |
272 | } | 310 | } |
273 | 311 | ||
274 | void __devexit mthca_free_agents(struct mthca_dev *dev) | 312 | void __devexit mthca_free_agents(struct mthca_dev *dev) |
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 266f347c6707..9b9ff7bff357 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c | |||
@@ -52,6 +52,14 @@ MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver"); | |||
52 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
53 | MODULE_VERSION(DRV_VERSION); | 53 | MODULE_VERSION(DRV_VERSION); |
54 | 54 | ||
55 | #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG | ||
56 | |||
57 | int mthca_debug_level = 0; | ||
58 | module_param_named(debug_level, mthca_debug_level, int, 0644); | ||
59 | MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); | ||
60 | |||
61 | #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */ | ||
62 | |||
55 | #ifdef CONFIG_PCI_MSI | 63 | #ifdef CONFIG_PCI_MSI |
56 | 64 | ||
57 | static int msi_x = 0; | 65 | static int msi_x = 0; |
@@ -69,6 +77,10 @@ MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero"); | |||
69 | 77 | ||
70 | #endif /* CONFIG_PCI_MSI */ | 78 | #endif /* CONFIG_PCI_MSI */ |
71 | 79 | ||
80 | static int tune_pci = 0; | ||
81 | module_param(tune_pci, int, 0444); | ||
82 | MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero"); | ||
83 | |||
72 | static const char mthca_version[] __devinitdata = | 84 | static const char mthca_version[] __devinitdata = |
73 | DRV_NAME ": Mellanox InfiniBand HCA driver v" | 85 | DRV_NAME ": Mellanox InfiniBand HCA driver v" |
74 | DRV_VERSION " (" DRV_RELDATE ")\n"; | 86 | DRV_VERSION " (" DRV_RELDATE ")\n"; |
@@ -90,6 +102,9 @@ static int __devinit mthca_tune_pci(struct mthca_dev *mdev) | |||
90 | int cap; | 102 | int cap; |
91 | u16 val; | 103 | u16 val; |
92 | 104 | ||
105 | if (!tune_pci) | ||
106 | return 0; | ||
107 | |||
93 | /* First try to max out Read Byte Count */ | 108 | /* First try to max out Read Byte Count */ |
94 | cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX); | 109 | cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX); |
95 | if (cap) { | 110 | if (cap) { |
@@ -176,6 +191,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
176 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; | 191 | mdev->limits.reserved_srqs = dev_lim->reserved_srqs; |
177 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; | 192 | mdev->limits.reserved_eecs = dev_lim->reserved_eecs; |
178 | mdev->limits.max_desc_sz = dev_lim->max_desc_sz; | 193 | mdev->limits.max_desc_sz = dev_lim->max_desc_sz; |
194 | mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); | ||
179 | /* | 195 | /* |
180 | * Subtract 1 from the limit because we need to allocate a | 196 | * Subtract 1 from the limit because we need to allocate a |
181 | * spare CQE so the HCA HW can tell the difference between an | 197 | * spare CQE so the HCA HW can tell the difference between an |
@@ -191,6 +207,18 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim | |||
191 | mdev->limits.port_width_cap = dev_lim->max_port_width; | 207 | mdev->limits.port_width_cap = dev_lim->max_port_width; |
192 | mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); | 208 | mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); |
193 | mdev->limits.flags = dev_lim->flags; | 209 | mdev->limits.flags = dev_lim->flags; |
210 | /* | ||
211 | * For old FW that doesn't return static rate support, use a | ||
212 | * value of 0x3 (only static rate values of 0 or 1 are handled), | ||
213 | * except on Sinai, where even old FW can handle static rate | ||
214 | * values of 2 and 3. | ||
215 | */ | ||
216 | if (dev_lim->stat_rate_support) | ||
217 | mdev->limits.stat_rate_support = dev_lim->stat_rate_support; | ||
218 | else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) | ||
219 | mdev->limits.stat_rate_support = 0xf; | ||
220 | else | ||
221 | mdev->limits.stat_rate_support = 0x3; | ||
194 | 222 | ||
195 | /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. | 223 | /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. |
196 | May be doable since hardware supports it for SRQ. | 224 | May be doable since hardware supports it for SRQ. |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 2c250bc11c33..565a24b1756f 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -106,7 +106,7 @@ static int mthca_query_device(struct ib_device *ibdev, | |||
106 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; | 106 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; |
107 | props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; | 107 | props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; |
108 | props->max_srq_wr = mdev->limits.max_srq_wqes; | 108 | props->max_srq_wr = mdev->limits.max_srq_wqes; |
109 | props->max_srq_sge = mdev->limits.max_sg; | 109 | props->max_srq_sge = mdev->limits.max_srq_sge; |
110 | props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; | 110 | props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; |
111 | props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? | 111 | props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? |
112 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; | 112 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 2e7f52136965..6676a786d690 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h | |||
@@ -257,6 +257,8 @@ struct mthca_qp { | |||
257 | atomic_t refcount; | 257 | atomic_t refcount; |
258 | u32 qpn; | 258 | u32 qpn; |
259 | int is_direct; | 259 | int is_direct; |
260 | u8 port; /* for SQP and memfree use only */ | ||
261 | u8 alt_port; /* for memfree use only */ | ||
260 | u8 transport; | 262 | u8 transport; |
261 | u8 state; | 263 | u8 state; |
262 | u8 atomic_rd_en; | 264 | u8 atomic_rd_en; |
@@ -278,7 +280,6 @@ struct mthca_qp { | |||
278 | 280 | ||
279 | struct mthca_sqp { | 281 | struct mthca_sqp { |
280 | struct mthca_qp qp; | 282 | struct mthca_qp qp; |
281 | int port; | ||
282 | int pkey_index; | 283 | int pkey_index; |
283 | u32 qkey; | 284 | u32 qkey; |
284 | u32 send_psn; | 285 | u32 send_psn; |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 057c8e6af87b..f37b0e367323 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -248,6 +248,9 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |||
248 | return; | 248 | return; |
249 | } | 249 | } |
250 | 250 | ||
251 | if (event_type == IB_EVENT_PATH_MIG) | ||
252 | qp->port = qp->alt_port; | ||
253 | |||
251 | event.device = &dev->ib_dev; | 254 | event.device = &dev->ib_dev; |
252 | event.event = event_type; | 255 | event.event = event_type; |
253 | event.element.qp = &qp->ibqp; | 256 | event.element.qp = &qp->ibqp; |
@@ -392,10 +395,16 @@ static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, | |||
392 | { | 395 | { |
393 | memset(ib_ah_attr, 0, sizeof *path); | 396 | memset(ib_ah_attr, 0, sizeof *path); |
394 | ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; | 397 | ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; |
398 | |||
399 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) | ||
400 | return; | ||
401 | |||
395 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); | 402 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); |
396 | ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; | 403 | ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; |
397 | ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; | 404 | ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; |
398 | ib_ah_attr->static_rate = path->static_rate & 0x7; | 405 | ib_ah_attr->static_rate = mthca_rate_to_ib(dev, |
406 | path->static_rate & 0x7, | ||
407 | ib_ah_attr->port_num); | ||
399 | ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; | 408 | ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; |
400 | if (ib_ah_attr->ah_flags) { | 409 | if (ib_ah_attr->ah_flags) { |
401 | ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); | 410 | ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); |
@@ -455,8 +464,10 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m | |||
455 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | 464 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; |
456 | qp_attr->cap.max_inline_data = qp->max_inline_data; | 465 | qp_attr->cap.max_inline_data = qp->max_inline_data; |
457 | 466 | ||
458 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | 467 | if (qp->transport == RC || qp->transport == UC) { |
459 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | 468 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); |
469 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | ||
470 | } | ||
460 | 471 | ||
461 | qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; | 472 | qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; |
462 | qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; | 473 | qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; |
@@ -484,11 +495,11 @@ out: | |||
484 | } | 495 | } |
485 | 496 | ||
486 | static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, | 497 | static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, |
487 | struct mthca_qp_path *path) | 498 | struct mthca_qp_path *path, u8 port) |
488 | { | 499 | { |
489 | path->g_mylmc = ah->src_path_bits & 0x7f; | 500 | path->g_mylmc = ah->src_path_bits & 0x7f; |
490 | path->rlid = cpu_to_be16(ah->dlid); | 501 | path->rlid = cpu_to_be16(ah->dlid); |
491 | path->static_rate = !!ah->static_rate; | 502 | path->static_rate = mthca_get_rate(dev, ah->static_rate, port); |
492 | 503 | ||
493 | if (ah->ah_flags & IB_AH_GRH) { | 504 | if (ah->ah_flags & IB_AH_GRH) { |
494 | if (ah->grh.sgid_index >= dev->limits.gid_table_len) { | 505 | if (ah->grh.sgid_index >= dev->limits.gid_table_len) { |
@@ -634,7 +645,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
634 | 645 | ||
635 | if (qp->transport == MLX) | 646 | if (qp->transport == MLX) |
636 | qp_context->pri_path.port_pkey |= | 647 | qp_context->pri_path.port_pkey |= |
637 | cpu_to_be32(to_msqp(qp)->port << 24); | 648 | cpu_to_be32(qp->port << 24); |
638 | else { | 649 | else { |
639 | if (attr_mask & IB_QP_PORT) { | 650 | if (attr_mask & IB_QP_PORT) { |
640 | qp_context->pri_path.port_pkey |= | 651 | qp_context->pri_path.port_pkey |= |
@@ -657,7 +668,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
657 | } | 668 | } |
658 | 669 | ||
659 | if (attr_mask & IB_QP_AV) { | 670 | if (attr_mask & IB_QP_AV) { |
660 | if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path)) | 671 | if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, |
672 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) | ||
661 | return -EINVAL; | 673 | return -EINVAL; |
662 | 674 | ||
663 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); | 675 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); |
@@ -681,7 +693,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
681 | return -EINVAL; | 693 | return -EINVAL; |
682 | } | 694 | } |
683 | 695 | ||
684 | if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path)) | 696 | if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, |
697 | attr->alt_ah_attr.port_num)) | ||
685 | return -EINVAL; | 698 | return -EINVAL; |
686 | 699 | ||
687 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | | 700 | qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | |
@@ -791,6 +804,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
791 | qp->atomic_rd_en = attr->qp_access_flags; | 804 | qp->atomic_rd_en = attr->qp_access_flags; |
792 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | 805 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
793 | qp->resp_depth = attr->max_dest_rd_atomic; | 806 | qp->resp_depth = attr->max_dest_rd_atomic; |
807 | if (attr_mask & IB_QP_PORT) | ||
808 | qp->port = attr->port_num; | ||
809 | if (attr_mask & IB_QP_ALT_PATH) | ||
810 | qp->alt_port = attr->alt_port_num; | ||
794 | 811 | ||
795 | if (is_sqp(dev, qp)) | 812 | if (is_sqp(dev, qp)) |
796 | store_attrs(to_msqp(qp), attr, attr_mask); | 813 | store_attrs(to_msqp(qp), attr, attr_mask); |
@@ -802,13 +819,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |||
802 | if (is_qp0(dev, qp)) { | 819 | if (is_qp0(dev, qp)) { |
803 | if (cur_state != IB_QPS_RTR && | 820 | if (cur_state != IB_QPS_RTR && |
804 | new_state == IB_QPS_RTR) | 821 | new_state == IB_QPS_RTR) |
805 | init_port(dev, to_msqp(qp)->port); | 822 | init_port(dev, qp->port); |
806 | 823 | ||
807 | if (cur_state != IB_QPS_RESET && | 824 | if (cur_state != IB_QPS_RESET && |
808 | cur_state != IB_QPS_ERR && | 825 | cur_state != IB_QPS_ERR && |
809 | (new_state == IB_QPS_RESET || | 826 | (new_state == IB_QPS_RESET || |
810 | new_state == IB_QPS_ERR)) | 827 | new_state == IB_QPS_ERR)) |
811 | mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); | 828 | mthca_CLOSE_IB(dev, qp->port, &status); |
812 | } | 829 | } |
813 | 830 | ||
814 | /* | 831 | /* |
@@ -1212,6 +1229,9 @@ int mthca_alloc_qp(struct mthca_dev *dev, | |||
1212 | if (qp->qpn == -1) | 1229 | if (qp->qpn == -1) |
1213 | return -ENOMEM; | 1230 | return -ENOMEM; |
1214 | 1231 | ||
1232 | /* initialize port to zero for error-catching. */ | ||
1233 | qp->port = 0; | ||
1234 | |||
1215 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | 1235 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, |
1216 | send_policy, qp); | 1236 | send_policy, qp); |
1217 | if (err) { | 1237 | if (err) { |
@@ -1261,7 +1281,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev, | |||
1261 | if (err) | 1281 | if (err) |
1262 | goto err_out; | 1282 | goto err_out; |
1263 | 1283 | ||
1264 | sqp->port = port; | 1284 | sqp->qp.port = port; |
1265 | sqp->qp.qpn = mqpn; | 1285 | sqp->qp.qpn = mqpn; |
1266 | sqp->qp.transport = MLX; | 1286 | sqp->qp.transport = MLX; |
1267 | 1287 | ||
@@ -1404,10 +1424,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1404 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | 1424 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; |
1405 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); | 1425 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); |
1406 | if (!sqp->qp.ibqp.qp_num) | 1426 | if (!sqp->qp.ibqp.qp_num) |
1407 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | 1427 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
1408 | sqp->pkey_index, &pkey); | 1428 | sqp->pkey_index, &pkey); |
1409 | else | 1429 | else |
1410 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | 1430 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
1411 | wr->wr.ud.pkey_index, &pkey); | 1431 | wr->wr.ud.pkey_index, &pkey); |
1412 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | 1432 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
1413 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1433 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index 2dd3aea05341..adcaf85355ae 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -192,7 +192,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, | |||
192 | 192 | ||
193 | /* Sanity check SRQ size before proceeding */ | 193 | /* Sanity check SRQ size before proceeding */ |
194 | if (attr->max_wr > dev->limits.max_srq_wqes || | 194 | if (attr->max_wr > dev->limits.max_srq_wqes || |
195 | attr->max_sge > dev->limits.max_sg) | 195 | attr->max_sge > dev->limits.max_srq_sge) |
196 | return -EINVAL; | 196 | return -EINVAL; |
197 | 197 | ||
198 | srq->max = attr->max_wr; | 198 | srq->max = attr->max_wr; |
@@ -660,6 +660,31 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
660 | return err; | 660 | return err; |
661 | } | 661 | } |
662 | 662 | ||
663 | int mthca_max_srq_sge(struct mthca_dev *dev) | ||
664 | { | ||
665 | if (mthca_is_memfree(dev)) | ||
666 | return dev->limits.max_sg; | ||
667 | |||
668 | /* | ||
669 | * SRQ allocations are based on powers of 2 for Tavor, | ||
670 | * (although they only need to be multiples of 16 bytes). | ||
671 | * | ||
672 | * Therefore, we need to base the max number of sg entries on | ||
673 | * the largest power of 2 descriptor size that is <= to the | ||
674 | * actual max WQE descriptor size, rather than return the | ||
675 | * max_sg value given by the firmware (which is based on WQE | ||
676 | * sizes as multiples of 16, not powers of 2). | ||
677 | * | ||
678 | * If SRQ implementation is changed for Tavor to be based on | ||
679 | * multiples of 16, the calculation below can be deleted and | ||
680 | * the FW max_sg value returned. | ||
681 | */ | ||
682 | return min_t(int, dev->limits.max_sg, | ||
683 | ((1 << (fls(dev->limits.max_desc_sz) - 1)) - | ||
684 | sizeof (struct mthca_next_seg)) / | ||
685 | sizeof (struct mthca_data_seg)); | ||
686 | } | ||
687 | |||
663 | int __devinit mthca_init_srq_table(struct mthca_dev *dev) | 688 | int __devinit mthca_init_srq_table(struct mthca_dev *dev) |
664 | { | 689 | { |
665 | int err; | 690 | int err; |