diff options
39 files changed, 561 insertions, 295 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 1a696f76b616..0bb99bb38809 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -624,17 +624,6 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
624 | */ | 624 | */ |
625 | BUG_ON(iw_event->status); | 625 | BUG_ON(iw_event->status); |
626 | 626 | ||
627 | /* | ||
628 | * We could be destroying the listening id. If so, ignore this | ||
629 | * upcall. | ||
630 | */ | ||
631 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
632 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
633 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
634 | goto out; | ||
635 | } | ||
636 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
637 | |||
638 | cm_id = iw_create_cm_id(listen_id_priv->id.device, | 627 | cm_id = iw_create_cm_id(listen_id_priv->id.device, |
639 | listen_id_priv->id.cm_handler, | 628 | listen_id_priv->id.cm_handler, |
640 | listen_id_priv->id.context); | 629 | listen_id_priv->id.context); |
@@ -649,6 +638,19 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
649 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 638 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
650 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; | 639 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; |
651 | 640 | ||
641 | /* | ||
642 | * We could be destroying the listening id. If so, ignore this | ||
643 | * upcall. | ||
644 | */ | ||
645 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
646 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
647 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
648 | iw_cm_reject(cm_id, NULL, 0); | ||
649 | iw_destroy_cm_id(cm_id); | ||
650 | goto out; | ||
651 | } | ||
652 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
653 | |||
652 | ret = alloc_work_entries(cm_id_priv, 3); | 654 | ret = alloc_work_entries(cm_id_priv, 3); |
653 | if (ret) { | 655 | if (ret) { |
654 | iw_cm_reject(cm_id, NULL, 0); | 656 | iw_cm_reject(cm_id, NULL, 0); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 2fe428bba54c..426bb7617ec6 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1842,6 +1842,24 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1842 | } | 1842 | } |
1843 | } | 1843 | } |
1844 | 1844 | ||
1845 | static bool generate_unmatched_resp(struct ib_mad_private *recv, | ||
1846 | struct ib_mad_private *response) | ||
1847 | { | ||
1848 | if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || | ||
1849 | recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { | ||
1850 | memcpy(response, recv, sizeof *response); | ||
1851 | response->header.recv_wc.wc = &response->header.wc; | ||
1852 | response->header.recv_wc.recv_buf.mad = &response->mad.mad; | ||
1853 | response->header.recv_wc.recv_buf.grh = &response->grh; | ||
1854 | response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | ||
1855 | response->mad.mad.mad_hdr.status = | ||
1856 | cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | ||
1857 | |||
1858 | return true; | ||
1859 | } else { | ||
1860 | return false; | ||
1861 | } | ||
1862 | } | ||
1845 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | 1863 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, |
1846 | struct ib_wc *wc) | 1864 | struct ib_wc *wc) |
1847 | { | 1865 | { |
@@ -1963,6 +1981,9 @@ local: | |||
1963 | * or via recv_handler in ib_mad_complete_recv() | 1981 | * or via recv_handler in ib_mad_complete_recv() |
1964 | */ | 1982 | */ |
1965 | recv = NULL; | 1983 | recv = NULL; |
1984 | } else if (generate_unmatched_resp(recv, response)) { | ||
1985 | agent_send_response(&response->mad.mad, &recv->grh, wc, | ||
1986 | port_priv->device, port_num, qp_info->qp->qp_num); | ||
1966 | } | 1987 | } |
1967 | 1988 | ||
1968 | out: | 1989 | out: |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index c61bca30fd2d..83b720ef6c34 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -179,33 +179,36 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
179 | { | 179 | { |
180 | struct ib_port_attr attr; | 180 | struct ib_port_attr attr; |
181 | char *speed = ""; | 181 | char *speed = ""; |
182 | int rate; | 182 | int rate = -1; /* in deci-Gb/sec */ |
183 | ssize_t ret; | 183 | ssize_t ret; |
184 | 184 | ||
185 | ret = ib_query_port(p->ibdev, p->port_num, &attr); | 185 | ret = ib_query_port(p->ibdev, p->port_num, &attr); |
186 | if (ret) | 186 | if (ret) |
187 | return ret; | 187 | return ret; |
188 | 188 | ||
189 | rate = (25 * attr.active_speed) / 10; | ||
190 | |||
191 | switch (attr.active_speed) { | 189 | switch (attr.active_speed) { |
192 | case 2: | 190 | case IB_SPEED_SDR: |
191 | rate = 25; | ||
192 | break; | ||
193 | case IB_SPEED_DDR: | ||
193 | speed = " DDR"; | 194 | speed = " DDR"; |
195 | rate = 50; | ||
194 | break; | 196 | break; |
195 | case 4: | 197 | case IB_SPEED_QDR: |
196 | speed = " QDR"; | 198 | speed = " QDR"; |
199 | rate = 100; | ||
197 | break; | 200 | break; |
198 | case 8: | 201 | case IB_SPEED_FDR10: |
199 | speed = " FDR10"; | 202 | speed = " FDR10"; |
200 | rate = 10; | 203 | rate = 100; |
201 | break; | 204 | break; |
202 | case 16: | 205 | case IB_SPEED_FDR: |
203 | speed = " FDR"; | 206 | speed = " FDR"; |
204 | rate = 14; | 207 | rate = 140; |
205 | break; | 208 | break; |
206 | case 32: | 209 | case IB_SPEED_EDR: |
207 | speed = " EDR"; | 210 | speed = " EDR"; |
208 | rate = 25; | 211 | rate = 250; |
209 | break; | 212 | break; |
210 | } | 213 | } |
211 | 214 | ||
@@ -214,7 +217,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
214 | return -EINVAL; | 217 | return -EINVAL; |
215 | 218 | ||
216 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", | 219 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", |
217 | rate, (attr.active_speed == 1) ? ".5" : "", | 220 | rate / 10, rate % 10 ? ".5" : "", |
218 | ib_width_enum_to_int(attr.active_width), speed); | 221 | ib_width_enum_to_int(attr.active_width), speed); |
219 | } | 222 | } |
220 | 223 | ||
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5034a87cc72d..5861cdb22b7c 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -449,24 +449,6 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx) | |||
449 | mutex_unlock(&mut); | 449 | mutex_unlock(&mut); |
450 | } | 450 | } |
451 | 451 | ||
452 | static void ucma_cleanup_events(struct ucma_context *ctx) | ||
453 | { | ||
454 | struct ucma_event *uevent, *tmp; | ||
455 | |||
456 | list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { | ||
457 | if (uevent->ctx != ctx) | ||
458 | continue; | ||
459 | |||
460 | list_del(&uevent->list); | ||
461 | |||
462 | /* clear incoming connections. */ | ||
463 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) | ||
464 | rdma_destroy_id(uevent->cm_id); | ||
465 | |||
466 | kfree(uevent); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static void ucma_cleanup_mc_events(struct ucma_multicast *mc) | 452 | static void ucma_cleanup_mc_events(struct ucma_multicast *mc) |
471 | { | 453 | { |
472 | struct ucma_event *uevent, *tmp; | 454 | struct ucma_event *uevent, *tmp; |
@@ -480,9 +462,16 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) | |||
480 | } | 462 | } |
481 | } | 463 | } |
482 | 464 | ||
465 | /* | ||
466 | * We cannot hold file->mut when calling rdma_destroy_id() or we can | ||
467 | * deadlock. We also acquire file->mut in ucma_event_handler(), and | ||
468 | * rdma_destroy_id() will wait until all callbacks have completed. | ||
469 | */ | ||
483 | static int ucma_free_ctx(struct ucma_context *ctx) | 470 | static int ucma_free_ctx(struct ucma_context *ctx) |
484 | { | 471 | { |
485 | int events_reported; | 472 | int events_reported; |
473 | struct ucma_event *uevent, *tmp; | ||
474 | LIST_HEAD(list); | ||
486 | 475 | ||
487 | /* No new events will be generated after destroying the id. */ | 476 | /* No new events will be generated after destroying the id. */ |
488 | rdma_destroy_id(ctx->cm_id); | 477 | rdma_destroy_id(ctx->cm_id); |
@@ -491,10 +480,20 @@ static int ucma_free_ctx(struct ucma_context *ctx) | |||
491 | 480 | ||
492 | /* Cleanup events not yet reported to the user. */ | 481 | /* Cleanup events not yet reported to the user. */ |
493 | mutex_lock(&ctx->file->mut); | 482 | mutex_lock(&ctx->file->mut); |
494 | ucma_cleanup_events(ctx); | 483 | list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { |
484 | if (uevent->ctx == ctx) | ||
485 | list_move_tail(&uevent->list, &list); | ||
486 | } | ||
495 | list_del(&ctx->list); | 487 | list_del(&ctx->list); |
496 | mutex_unlock(&ctx->file->mut); | 488 | mutex_unlock(&ctx->file->mut); |
497 | 489 | ||
490 | list_for_each_entry_safe(uevent, tmp, &list, list) { | ||
491 | list_del(&uevent->list); | ||
492 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) | ||
493 | rdma_destroy_id(uevent->cm_id); | ||
494 | kfree(uevent); | ||
495 | } | ||
496 | |||
498 | events_reported = ctx->events_reported; | 497 | events_reported = ctx->events_reported; |
499 | kfree(ctx); | 498 | kfree(ctx); |
500 | return events_reported; | 499 | return events_reported; |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 12f923d64e42..07eb3a8067d8 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -94,7 +94,7 @@ static int c2_query_port(struct ib_device *ibdev, | |||
94 | props->pkey_tbl_len = 1; | 94 | props->pkey_tbl_len = 1; |
95 | props->qkey_viol_cntr = 0; | 95 | props->qkey_viol_cntr = 0; |
96 | props->active_width = 1; | 96 | props->active_width = 1; |
97 | props->active_speed = 1; | 97 | props->active_speed = IB_SPEED_SDR; |
98 | 98 | ||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 37c224fc3ad9..0bdf09aa6f42 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -1227,7 +1227,7 @@ static int iwch_query_port(struct ib_device *ibdev, | |||
1227 | props->gid_tbl_len = 1; | 1227 | props->gid_tbl_len = 1; |
1228 | props->pkey_tbl_len = 1; | 1228 | props->pkey_tbl_len = 1; |
1229 | props->active_width = 2; | 1229 | props->active_width = 2; |
1230 | props->active_speed = 2; | 1230 | props->active_speed = IB_SPEED_DDR; |
1231 | props->max_msg_sz = -1; | 1231 | props->max_msg_sz = -1; |
1232 | 1232 | ||
1233 | return 0; | 1233 | return 0; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index bea5839d89ee..6de8463f453b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) | |||
803 | * Assumes qhp lock is held. | 803 | * Assumes qhp lock is held. |
804 | */ | 804 | */ |
805 | static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | 805 | static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, |
806 | struct iwch_cq *schp, unsigned long *flag) | 806 | struct iwch_cq *schp) |
807 | { | 807 | { |
808 | int count; | 808 | int count; |
809 | int flushed; | 809 | int flushed; |
@@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, | |||
812 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | 812 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
813 | /* take a ref on the qhp since we must release the lock */ | 813 | /* take a ref on the qhp since we must release the lock */ |
814 | atomic_inc(&qhp->refcnt); | 814 | atomic_inc(&qhp->refcnt); |
815 | spin_unlock_irqrestore(&qhp->lock, *flag); | 815 | spin_unlock(&qhp->lock); |
816 | 816 | ||
817 | /* locking hierarchy: cq lock first, then qp lock. */ | 817 | /* locking hierarchy: cq lock first, then qp lock. */ |
818 | spin_lock_irqsave(&rchp->lock, *flag); | 818 | spin_lock(&rchp->lock); |
819 | spin_lock(&qhp->lock); | 819 | spin_lock(&qhp->lock); |
820 | cxio_flush_hw_cq(&rchp->cq); | 820 | cxio_flush_hw_cq(&rchp->cq); |
821 | cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); | 821 | cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); |
822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); | 822 | flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); |
823 | spin_unlock(&qhp->lock); | 823 | spin_unlock(&qhp->lock); |
824 | spin_unlock_irqrestore(&rchp->lock, *flag); | 824 | spin_unlock(&rchp->lock); |
825 | if (flushed) { | 825 | if (flushed) { |
826 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | 826 | spin_lock(&rchp->comp_handler_lock); |
827 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 827 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
828 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | 828 | spin_unlock(&rchp->comp_handler_lock); |
829 | } | 829 | } |
830 | 830 | ||
831 | /* locking hierarchy: cq lock first, then qp lock. */ | 831 | /* locking hierarchy: cq lock first, then qp lock. */ |
832 | spin_lock_irqsave(&schp->lock, *flag); | 832 | spin_lock(&schp->lock); |
833 | spin_lock(&qhp->lock); | 833 | spin_lock(&qhp->lock); |
834 | cxio_flush_hw_cq(&schp->cq); | 834 | cxio_flush_hw_cq(&schp->cq); |
835 | cxio_count_scqes(&schp->cq, &qhp->wq, &count); | 835 | cxio_count_scqes(&schp->cq, &qhp->wq, &count); |
836 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); | 836 | flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); |
837 | spin_unlock(&qhp->lock); | 837 | spin_unlock(&qhp->lock); |
838 | spin_unlock_irqrestore(&schp->lock, *flag); | 838 | spin_unlock(&schp->lock); |
839 | if (flushed) { | 839 | if (flushed) { |
840 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | 840 | spin_lock(&schp->comp_handler_lock); |
841 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 841 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); |
842 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | 842 | spin_unlock(&schp->comp_handler_lock); |
843 | } | 843 | } |
844 | 844 | ||
845 | /* deref */ | 845 | /* deref */ |
846 | if (atomic_dec_and_test(&qhp->refcnt)) | 846 | if (atomic_dec_and_test(&qhp->refcnt)) |
847 | wake_up(&qhp->wait); | 847 | wake_up(&qhp->wait); |
848 | 848 | ||
849 | spin_lock_irqsave(&qhp->lock, *flag); | 849 | spin_lock(&qhp->lock); |
850 | } | 850 | } |
851 | 851 | ||
852 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | 852 | static void flush_qp(struct iwch_qp *qhp) |
853 | { | 853 | { |
854 | struct iwch_cq *rchp, *schp; | 854 | struct iwch_cq *rchp, *schp; |
855 | 855 | ||
@@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
859 | if (qhp->ibqp.uobject) { | 859 | if (qhp->ibqp.uobject) { |
860 | cxio_set_wq_in_error(&qhp->wq); | 860 | cxio_set_wq_in_error(&qhp->wq); |
861 | cxio_set_cq_in_error(&rchp->cq); | 861 | cxio_set_cq_in_error(&rchp->cq); |
862 | spin_lock_irqsave(&rchp->comp_handler_lock, *flag); | 862 | spin_lock(&rchp->comp_handler_lock); |
863 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 863 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
864 | spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); | 864 | spin_unlock(&rchp->comp_handler_lock); |
865 | if (schp != rchp) { | 865 | if (schp != rchp) { |
866 | cxio_set_cq_in_error(&schp->cq); | 866 | cxio_set_cq_in_error(&schp->cq); |
867 | spin_lock_irqsave(&schp->comp_handler_lock, *flag); | 867 | spin_lock(&schp->comp_handler_lock); |
868 | (*schp->ibcq.comp_handler)(&schp->ibcq, | 868 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
869 | schp->ibcq.cq_context); | 869 | schp->ibcq.cq_context); |
870 | spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); | 870 | spin_unlock(&schp->comp_handler_lock); |
871 | } | 871 | } |
872 | return; | 872 | return; |
873 | } | 873 | } |
874 | __flush_qp(qhp, rchp, schp, flag); | 874 | __flush_qp(qhp, rchp, schp); |
875 | } | 875 | } |
876 | 876 | ||
877 | 877 | ||
@@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
1030 | break; | 1030 | break; |
1031 | case IWCH_QP_STATE_ERROR: | 1031 | case IWCH_QP_STATE_ERROR: |
1032 | qhp->attr.state = IWCH_QP_STATE_ERROR; | 1032 | qhp->attr.state = IWCH_QP_STATE_ERROR; |
1033 | flush_qp(qhp, &flag); | 1033 | flush_qp(qhp); |
1034 | break; | 1034 | break; |
1035 | default: | 1035 | default: |
1036 | ret = -EINVAL; | 1036 | ret = -EINVAL; |
@@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
1078 | } | 1078 | } |
1079 | switch (attrs->next_state) { | 1079 | switch (attrs->next_state) { |
1080 | case IWCH_QP_STATE_IDLE: | 1080 | case IWCH_QP_STATE_IDLE: |
1081 | flush_qp(qhp, &flag); | 1081 | flush_qp(qhp); |
1082 | qhp->attr.state = IWCH_QP_STATE_IDLE; | 1082 | qhp->attr.state = IWCH_QP_STATE_IDLE; |
1083 | qhp->attr.llp_stream_handle = NULL; | 1083 | qhp->attr.llp_stream_handle = NULL; |
1084 | put_ep(&qhp->ep->com); | 1084 | put_ep(&qhp->ep->com); |
@@ -1132,7 +1132,7 @@ err: | |||
1132 | free=1; | 1132 | free=1; |
1133 | wake_up(&qhp->wait); | 1133 | wake_up(&qhp->wait); |
1134 | BUG_ON(!ep); | 1134 | BUG_ON(!ep); |
1135 | flush_qp(qhp, &flag); | 1135 | flush_qp(qhp); |
1136 | out: | 1136 | out: |
1137 | spin_unlock_irqrestore(&qhp->lock, flag); | 1137 | spin_unlock_irqrestore(&qhp->lock, flag); |
1138 | 1138 | ||
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0668bb3472d0..006a35372b7a 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1114 | * generated when moving QP to RTS state. | 1114 | * generated when moving QP to RTS state. |
1115 | * A TERM message will be sent after QP has moved to RTS state | 1115 | * A TERM message will be sent after QP has moved to RTS state |
1116 | */ | 1116 | */ |
1117 | if ((ep->mpa_attr.version == 2) && | 1117 | if ((ep->mpa_attr.version == 2) && peer2peer && |
1118 | (ep->mpa_attr.p2p_type != p2p_type)) { | 1118 | (ep->mpa_attr.p2p_type != p2p_type)) { |
1119 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | 1119 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; |
1120 | rtr_mismatch = 1; | 1120 | rtr_mismatch = 1; |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 247fe706e7fa..be1c18f44400 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -329,7 +329,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, | |||
329 | props->gid_tbl_len = 1; | 329 | props->gid_tbl_len = 1; |
330 | props->pkey_tbl_len = 1; | 330 | props->pkey_tbl_len = 1; |
331 | props->active_width = 2; | 331 | props->active_width = 2; |
332 | props->active_speed = 2; | 332 | props->active_speed = IB_SPEED_DDR; |
333 | props->max_msg_sz = -1; | 333 | props->max_msg_sz = -1; |
334 | 334 | ||
335 | return 0; | 335 | return 0; |
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 73edc3668663..9ed4d2588304 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
@@ -233,7 +233,7 @@ int ehca_query_port(struct ib_device *ibdev, | |||
233 | props->phys_state = 5; | 233 | props->phys_state = 5; |
234 | props->state = rblock->state; | 234 | props->state = rblock->state; |
235 | props->active_width = IB_WIDTH_12X; | 235 | props->active_width = IB_WIDTH_12X; |
236 | props->active_speed = 0x1; | 236 | props->active_speed = IB_SPEED_SDR; |
237 | } | 237 | } |
238 | 238 | ||
239 | query_port1: | 239 | query_port1: |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index e571e60ecb88..53589000fd07 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, | |||
786 | spin_lock_init(&cct->task_lock); | 786 | spin_lock_init(&cct->task_lock); |
787 | INIT_LIST_HEAD(&cct->cq_list); | 787 | INIT_LIST_HEAD(&cct->cq_list); |
788 | init_waitqueue_head(&cct->wait_queue); | 788 | init_waitqueue_head(&cct->wait_queue); |
789 | cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); | 789 | cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu), |
790 | "ehca_comp/%d", cpu); | ||
790 | 791 | ||
791 | return cct->task; | 792 | return cct->task; |
792 | } | 793 | } |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 43cae84005f0..b781b2cb0624 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -112,7 +112,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize) | |||
112 | 112 | ||
113 | static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) | 113 | static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) |
114 | { | 114 | { |
115 | return 1UL << ilog2(shca->hca_cap_mr_pgsize); | 115 | return rounddown_pow_of_two(shca->hca_cap_mr_pgsize); |
116 | } | 116 | } |
117 | 117 | ||
118 | static struct ehca_mr *ehca_mr_new(void) | 118 | static struct ehca_mr *ehca_mr_new(void) |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5ecf38d97269..77c8cb4c5073 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -720,7 +720,8 @@ repoll: | |||
720 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; | 720 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; |
721 | wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; | 721 | wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; |
722 | wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; | 722 | wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; |
723 | wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum); | 723 | wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, |
724 | cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; | ||
724 | if (rdma_port_get_link_layer(wc->qp->device, | 725 | if (rdma_port_get_link_layer(wc->qp->device, |
725 | (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET) | 726 | (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET) |
726 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; | 727 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; |
@@ -747,8 +748,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
747 | break; | 748 | break; |
748 | } | 749 | } |
749 | 750 | ||
750 | if (npolled) | 751 | mlx4_cq_set_ci(&cq->mcq); |
751 | mlx4_cq_set_ci(&cq->mcq); | ||
752 | 752 | ||
753 | spin_unlock_irqrestore(&cq->lock, flags); | 753 | spin_unlock_irqrestore(&cq->lock, flags); |
754 | 754 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 7b445df6a667..75d305629300 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -163,7 +163,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
163 | props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; | 163 | props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; |
164 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | 164 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
165 | props->max_mcast_grp; | 165 | props->max_mcast_grp; |
166 | props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1; | 166 | props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; |
167 | 167 | ||
168 | out: | 168 | out: |
169 | kfree(in_mad); | 169 | kfree(in_mad); |
@@ -182,12 +182,27 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) | |||
182 | } | 182 | } |
183 | 183 | ||
184 | static int ib_link_query_port(struct ib_device *ibdev, u8 port, | 184 | static int ib_link_query_port(struct ib_device *ibdev, u8 port, |
185 | struct ib_port_attr *props, | 185 | struct ib_port_attr *props) |
186 | struct ib_smp *in_mad, | ||
187 | struct ib_smp *out_mad) | ||
188 | { | 186 | { |
187 | struct ib_smp *in_mad = NULL; | ||
188 | struct ib_smp *out_mad = NULL; | ||
189 | int ext_active_speed; | 189 | int ext_active_speed; |
190 | int err; | 190 | int err = -ENOMEM; |
191 | |||
192 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | ||
193 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | ||
194 | if (!in_mad || !out_mad) | ||
195 | goto out; | ||
196 | |||
197 | init_query_mad(in_mad); | ||
198 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; | ||
199 | in_mad->attr_mod = cpu_to_be32(port); | ||
200 | |||
201 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, | ||
202 | in_mad, out_mad); | ||
203 | if (err) | ||
204 | goto out; | ||
205 | |||
191 | 206 | ||
192 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); | 207 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); |
193 | props->lmc = out_mad->data[34] & 0x7; | 208 | props->lmc = out_mad->data[34] & 0x7; |
@@ -215,34 +230,33 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, | |||
215 | 230 | ||
216 | switch (ext_active_speed) { | 231 | switch (ext_active_speed) { |
217 | case 1: | 232 | case 1: |
218 | props->active_speed = 16; /* FDR */ | 233 | props->active_speed = IB_SPEED_FDR; |
219 | break; | 234 | break; |
220 | case 2: | 235 | case 2: |
221 | props->active_speed = 32; /* EDR */ | 236 | props->active_speed = IB_SPEED_EDR; |
222 | break; | 237 | break; |
223 | } | 238 | } |
224 | } | 239 | } |
225 | 240 | ||
226 | /* If reported active speed is QDR, check if is FDR-10 */ | 241 | /* If reported active speed is QDR, check if is FDR-10 */ |
227 | if (props->active_speed == 4) { | 242 | if (props->active_speed == IB_SPEED_QDR) { |
228 | if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] & | 243 | init_query_mad(in_mad); |
229 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { | 244 | in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; |
230 | init_query_mad(in_mad); | 245 | in_mad->attr_mod = cpu_to_be32(port); |
231 | in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; | ||
232 | in_mad->attr_mod = cpu_to_be32(port); | ||
233 | |||
234 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, | ||
235 | NULL, NULL, in_mad, out_mad); | ||
236 | if (err) | ||
237 | return err; | ||
238 | 246 | ||
239 | /* Checking LinkSpeedActive for FDR-10 */ | 247 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, |
240 | if (out_mad->data[15] & 0x1) | 248 | NULL, NULL, in_mad, out_mad); |
241 | props->active_speed = 8; | 249 | if (err) |
242 | } | 250 | return err; |
243 | } | ||
244 | 251 | ||
245 | return 0; | 252 | /* Checking LinkSpeedActive for FDR-10 */ |
253 | if (out_mad->data[15] & 0x1) | ||
254 | props->active_speed = IB_SPEED_FDR10; | ||
255 | } | ||
256 | out: | ||
257 | kfree(in_mad); | ||
258 | kfree(out_mad); | ||
259 | return err; | ||
246 | } | 260 | } |
247 | 261 | ||
248 | static u8 state_to_phys_state(enum ib_port_state state) | 262 | static u8 state_to_phys_state(enum ib_port_state state) |
@@ -251,32 +265,42 @@ static u8 state_to_phys_state(enum ib_port_state state) | |||
251 | } | 265 | } |
252 | 266 | ||
253 | static int eth_link_query_port(struct ib_device *ibdev, u8 port, | 267 | static int eth_link_query_port(struct ib_device *ibdev, u8 port, |
254 | struct ib_port_attr *props, | 268 | struct ib_port_attr *props) |
255 | struct ib_smp *out_mad) | ||
256 | { | 269 | { |
257 | struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe; | 270 | |
271 | struct mlx4_ib_dev *mdev = to_mdev(ibdev); | ||
272 | struct mlx4_ib_iboe *iboe = &mdev->iboe; | ||
258 | struct net_device *ndev; | 273 | struct net_device *ndev; |
259 | enum ib_mtu tmp; | 274 | enum ib_mtu tmp; |
275 | struct mlx4_cmd_mailbox *mailbox; | ||
276 | int err = 0; | ||
277 | |||
278 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | ||
279 | if (IS_ERR(mailbox)) | ||
280 | return PTR_ERR(mailbox); | ||
281 | |||
282 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, | ||
283 | MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, | ||
284 | MLX4_CMD_WRAPPED); | ||
285 | if (err) | ||
286 | goto out; | ||
260 | 287 | ||
261 | props->active_width = IB_WIDTH_1X; | 288 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? |
262 | props->active_speed = 4; | 289 | IB_WIDTH_4X : IB_WIDTH_1X; |
290 | props->active_speed = IB_SPEED_QDR; | ||
263 | props->port_cap_flags = IB_PORT_CM_SUP; | 291 | props->port_cap_flags = IB_PORT_CM_SUP; |
264 | props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; | 292 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
265 | props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; | 293 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
266 | props->pkey_tbl_len = 1; | 294 | props->pkey_tbl_len = 1; |
267 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); | ||
268 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); | ||
269 | props->max_mtu = IB_MTU_4096; | 295 | props->max_mtu = IB_MTU_4096; |
270 | props->subnet_timeout = 0; | 296 | props->max_vl_num = 2; |
271 | props->max_vl_num = out_mad->data[37] >> 4; | ||
272 | props->init_type_reply = 0; | ||
273 | props->state = IB_PORT_DOWN; | 297 | props->state = IB_PORT_DOWN; |
274 | props->phys_state = state_to_phys_state(props->state); | 298 | props->phys_state = state_to_phys_state(props->state); |
275 | props->active_mtu = IB_MTU_256; | 299 | props->active_mtu = IB_MTU_256; |
276 | spin_lock(&iboe->lock); | 300 | spin_lock(&iboe->lock); |
277 | ndev = iboe->netdevs[port - 1]; | 301 | ndev = iboe->netdevs[port - 1]; |
278 | if (!ndev) | 302 | if (!ndev) |
279 | goto out; | 303 | goto out_unlock; |
280 | 304 | ||
281 | tmp = iboe_get_mtu(ndev->mtu); | 305 | tmp = iboe_get_mtu(ndev->mtu); |
282 | props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; | 306 | props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; |
@@ -284,41 +308,23 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
284 | props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? | 308 | props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? |
285 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 309 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
286 | props->phys_state = state_to_phys_state(props->state); | 310 | props->phys_state = state_to_phys_state(props->state); |
287 | 311 | out_unlock: | |
288 | out: | ||
289 | spin_unlock(&iboe->lock); | 312 | spin_unlock(&iboe->lock); |
290 | return 0; | 313 | out: |
314 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | ||
315 | return err; | ||
291 | } | 316 | } |
292 | 317 | ||
293 | static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | 318 | static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, |
294 | struct ib_port_attr *props) | 319 | struct ib_port_attr *props) |
295 | { | 320 | { |
296 | struct ib_smp *in_mad = NULL; | 321 | int err; |
297 | struct ib_smp *out_mad = NULL; | ||
298 | int err = -ENOMEM; | ||
299 | |||
300 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | ||
301 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | ||
302 | if (!in_mad || !out_mad) | ||
303 | goto out; | ||
304 | 322 | ||
305 | memset(props, 0, sizeof *props); | 323 | memset(props, 0, sizeof *props); |
306 | 324 | ||
307 | init_query_mad(in_mad); | ||
308 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; | ||
309 | in_mad->attr_mod = cpu_to_be32(port); | ||
310 | |||
311 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); | ||
312 | if (err) | ||
313 | goto out; | ||
314 | |||
315 | err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? | 325 | err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? |
316 | ib_link_query_port(ibdev, port, props, in_mad, out_mad) : | 326 | ib_link_query_port(ibdev, port, props) : |
317 | eth_link_query_port(ibdev, port, props, out_mad); | 327 | eth_link_query_port(ibdev, port, props); |
318 | |||
319 | out: | ||
320 | kfree(in_mad); | ||
321 | kfree(out_mad); | ||
322 | 328 | ||
323 | return err; | 329 | return err; |
324 | } | 330 | } |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index aa2aefa4236c..3a7848966627 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1884,6 +1884,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1884 | wmb(); | 1884 | wmb(); |
1885 | 1885 | ||
1886 | if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { | 1886 | if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { |
1887 | *bad_wr = wr; | ||
1887 | err = -EINVAL; | 1888 | err = -EINVAL; |
1888 | goto out; | 1889 | goto out; |
1889 | } | 1890 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 53157b86a1ba..40ba83338155 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -643,7 +643,8 @@ static inline int mthca_poll_one(struct mthca_dev *dev, | |||
643 | entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; | 643 | entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; |
644 | checksum = (be32_to_cpu(cqe->rqpn) >> 24) | | 644 | checksum = (be32_to_cpu(cqe->rqpn) >> 24) | |
645 | ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); | 645 | ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); |
646 | entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff); | 646 | entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ? |
647 | IB_WC_IP_CSUM_OK : 0; | ||
647 | } | 648 | } |
648 | 649 | ||
649 | entry->status = IB_WC_SUCCESS; | 650 | entry->status = IB_WC_SUCCESS; |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index a4972abedef1..da2c67db5ebb 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -338,18 +338,21 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
338 | case IETF_MPA_V2: { | 338 | case IETF_MPA_V2: { |
339 | u16 ird_size; | 339 | u16 ird_size; |
340 | u16 ord_size; | 340 | u16 ord_size; |
341 | u16 rtr_ctrl_ird; | ||
342 | u16 rtr_ctrl_ord; | ||
343 | |||
341 | mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; | 344 | mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; |
342 | mpa_hdr_len += IETF_RTR_MSG_SIZE; | 345 | mpa_hdr_len += IETF_RTR_MSG_SIZE; |
343 | cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE; | 346 | cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE; |
344 | rtr_msg = &mpa_v2_frame->rtr_msg; | 347 | rtr_msg = &mpa_v2_frame->rtr_msg; |
345 | 348 | ||
346 | /* parse rtr message */ | 349 | /* parse rtr message */ |
347 | rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird); | 350 | rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird); |
348 | rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord); | 351 | rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord); |
349 | ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD; | 352 | ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD; |
350 | ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD; | 353 | ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD; |
351 | 354 | ||
352 | if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) { | 355 | if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) { |
353 | /* send reset */ | 356 | /* send reset */ |
354 | return -EINVAL; | 357 | return -EINVAL; |
355 | } | 358 | } |
@@ -370,9 +373,9 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, | |||
370 | } | 373 | } |
371 | } | 374 | } |
372 | 375 | ||
373 | if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) { | 376 | if (rtr_ctrl_ord & IETF_RDMA0_READ) { |
374 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; | 377 | cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; |
375 | } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) { | 378 | } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) { |
376 | cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; | 379 | cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; |
377 | } else { /* Not supported RDMA0 operation */ | 380 | } else { /* Not supported RDMA0 operation */ |
378 | return -EINVAL; | 381 | return -EINVAL; |
@@ -543,6 +546,8 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, | |||
543 | { | 546 | { |
544 | struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; | 547 | struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; |
545 | struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; | 548 | struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; |
549 | u16 ctrl_ird; | ||
550 | u16 ctrl_ord; | ||
546 | 551 | ||
547 | /* initialize the upper 5 bytes of the frame */ | 552 | /* initialize the upper 5 bytes of the frame */ |
548 | build_mpa_v1(cm_node, start_addr, mpa_key); | 553 | build_mpa_v1(cm_node, start_addr, mpa_key); |
@@ -550,31 +555,31 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, | |||
550 | mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); | 555 | mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); |
551 | 556 | ||
552 | /* initialize RTR msg */ | 557 | /* initialize RTR msg */ |
553 | rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? | 558 | ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? |
554 | IETF_NO_IRD_ORD : cm_node->ird_size; | 559 | IETF_NO_IRD_ORD : cm_node->ird_size; |
555 | rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? | 560 | ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? |
556 | IETF_NO_IRD_ORD : cm_node->ord_size; | 561 | IETF_NO_IRD_ORD : cm_node->ord_size; |
557 | 562 | ||
558 | rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER; | 563 | ctrl_ird |= IETF_PEER_TO_PEER; |
559 | rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN; | 564 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; |
560 | 565 | ||
561 | switch (mpa_key) { | 566 | switch (mpa_key) { |
562 | case MPA_KEY_REQUEST: | 567 | case MPA_KEY_REQUEST: |
563 | rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; | 568 | ctrl_ord |= IETF_RDMA0_WRITE; |
564 | rtr_msg->ctrl_ord |= IETF_RDMA0_READ; | 569 | ctrl_ord |= IETF_RDMA0_READ; |
565 | break; | 570 | break; |
566 | case MPA_KEY_REPLY: | 571 | case MPA_KEY_REPLY: |
567 | switch (cm_node->send_rdma0_op) { | 572 | switch (cm_node->send_rdma0_op) { |
568 | case SEND_RDMA_WRITE_ZERO: | 573 | case SEND_RDMA_WRITE_ZERO: |
569 | rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; | 574 | ctrl_ord |= IETF_RDMA0_WRITE; |
570 | break; | 575 | break; |
571 | case SEND_RDMA_READ_ZERO: | 576 | case SEND_RDMA_READ_ZERO: |
572 | rtr_msg->ctrl_ord |= IETF_RDMA0_READ; | 577 | ctrl_ord |= IETF_RDMA0_READ; |
573 | break; | 578 | break; |
574 | } | 579 | } |
575 | } | 580 | } |
576 | rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird); | 581 | rtr_msg->ctrl_ird = htons(ctrl_ird); |
577 | rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord); | 582 | rtr_msg->ctrl_ord = htons(ctrl_ord); |
578 | } | 583 | } |
579 | 584 | ||
580 | /** | 585 | /** |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 0927b5cc65d3..8b8812de4b5c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -597,7 +597,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr | |||
597 | props->pkey_tbl_len = 1; | 597 | props->pkey_tbl_len = 1; |
598 | props->qkey_viol_cntr = 0; | 598 | props->qkey_viol_cntr = 0; |
599 | props->active_width = IB_WIDTH_4X; | 599 | props->active_width = IB_WIDTH_4X; |
600 | props->active_speed = 1; | 600 | props->active_speed = IB_SPEED_SDR; |
601 | props->max_msg_sz = 0x80000000; | 601 | props->max_msg_sz = 0x80000000; |
602 | 602 | ||
603 | return 0; | 603 | return 0; |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index b881bdc401f5..6b811e3e8bd1 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -427,6 +427,14 @@ struct qib_verbs_txreq { | |||
427 | /* how often we check for packet activity for "power on hours (in seconds) */ | 427 | /* how often we check for packet activity for "power on hours (in seconds) */ |
428 | #define ACTIVITY_TIMER 5 | 428 | #define ACTIVITY_TIMER 5 |
429 | 429 | ||
430 | #define MAX_NAME_SIZE 64 | ||
431 | struct qib_msix_entry { | ||
432 | struct msix_entry msix; | ||
433 | void *arg; | ||
434 | char name[MAX_NAME_SIZE]; | ||
435 | cpumask_var_t mask; | ||
436 | }; | ||
437 | |||
430 | /* Below is an opaque struct. Each chip (device) can maintain | 438 | /* Below is an opaque struct. Each chip (device) can maintain |
431 | * private data needed for its operation, but not germane to the | 439 | * private data needed for its operation, but not germane to the |
432 | * rest of the driver. For convenience, we define another that | 440 | * rest of the driver. For convenience, we define another that |
@@ -1355,7 +1363,7 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *); | |||
1355 | int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, | 1363 | int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, |
1356 | const struct pci_device_id *); | 1364 | const struct pci_device_id *); |
1357 | void qib_pcie_ddcleanup(struct qib_devdata *); | 1365 | void qib_pcie_ddcleanup(struct qib_devdata *); |
1358 | int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *); | 1366 | int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *); |
1359 | int qib_reinit_intr(struct qib_devdata *); | 1367 | int qib_reinit_intr(struct qib_devdata *); |
1360 | void qib_enable_intx(struct pci_dev *); | 1368 | void qib_enable_intx(struct pci_dev *); |
1361 | void qib_nomsi(struct qib_devdata *); | 1369 | void qib_nomsi(struct qib_devdata *); |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 41e92089e41b..060b96064469 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -541,8 +541,7 @@ struct qib_chip_specific { | |||
541 | u32 lastbuf_for_pio; | 541 | u32 lastbuf_for_pio; |
542 | u32 stay_in_freeze; | 542 | u32 stay_in_freeze; |
543 | u32 recovery_ports_initted; | 543 | u32 recovery_ports_initted; |
544 | struct msix_entry *msix_entries; | 544 | struct qib_msix_entry *msix_entries; |
545 | void **msix_arg; | ||
546 | unsigned long *sendchkenable; | 545 | unsigned long *sendchkenable; |
547 | unsigned long *sendgrhchk; | 546 | unsigned long *sendgrhchk; |
548 | unsigned long *sendibchk; | 547 | unsigned long *sendibchk; |
@@ -639,24 +638,24 @@ static struct { | |||
639 | int lsb; | 638 | int lsb; |
640 | int port; /* 0 if not port-specific, else port # */ | 639 | int port; /* 0 if not port-specific, else port # */ |
641 | } irq_table[] = { | 640 | } irq_table[] = { |
642 | { QIB_DRV_NAME, qib_7322intr, -1, 0 }, | 641 | { "", qib_7322intr, -1, 0 }, |
643 | { QIB_DRV_NAME " (buf avail)", qib_7322bufavail, | 642 | { " (buf avail)", qib_7322bufavail, |
644 | SYM_LSB(IntStatus, SendBufAvail), 0 }, | 643 | SYM_LSB(IntStatus, SendBufAvail), 0 }, |
645 | { QIB_DRV_NAME " (sdma 0)", sdma_intr, | 644 | { " (sdma 0)", sdma_intr, |
646 | SYM_LSB(IntStatus, SDmaInt_0), 1 }, | 645 | SYM_LSB(IntStatus, SDmaInt_0), 1 }, |
647 | { QIB_DRV_NAME " (sdma 1)", sdma_intr, | 646 | { " (sdma 1)", sdma_intr, |
648 | SYM_LSB(IntStatus, SDmaInt_1), 2 }, | 647 | SYM_LSB(IntStatus, SDmaInt_1), 2 }, |
649 | { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr, | 648 | { " (sdmaI 0)", sdma_idle_intr, |
650 | SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, | 649 | SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, |
651 | { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr, | 650 | { " (sdmaI 1)", sdma_idle_intr, |
652 | SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, | 651 | SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, |
653 | { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr, | 652 | { " (sdmaP 0)", sdma_progress_intr, |
654 | SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, | 653 | SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, |
655 | { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr, | 654 | { " (sdmaP 1)", sdma_progress_intr, |
656 | SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, | 655 | SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, |
657 | { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr, | 656 | { " (sdmaC 0)", sdma_cleanup_intr, |
658 | SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, | 657 | SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, |
659 | { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr, | 658 | { " (sdmaC 1)", sdma_cleanup_intr, |
660 | SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, | 659 | SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, |
661 | }; | 660 | }; |
662 | 661 | ||
@@ -2567,9 +2566,13 @@ static void qib_7322_nomsix(struct qib_devdata *dd) | |||
2567 | int i; | 2566 | int i; |
2568 | 2567 | ||
2569 | dd->cspec->num_msix_entries = 0; | 2568 | dd->cspec->num_msix_entries = 0; |
2570 | for (i = 0; i < n; i++) | 2569 | for (i = 0; i < n; i++) { |
2571 | free_irq(dd->cspec->msix_entries[i].vector, | 2570 | irq_set_affinity_hint( |
2572 | dd->cspec->msix_arg[i]); | 2571 | dd->cspec->msix_entries[i].msix.vector, NULL); |
2572 | free_cpumask_var(dd->cspec->msix_entries[i].mask); | ||
2573 | free_irq(dd->cspec->msix_entries[i].msix.vector, | ||
2574 | dd->cspec->msix_entries[i].arg); | ||
2575 | } | ||
2573 | qib_nomsix(dd); | 2576 | qib_nomsix(dd); |
2574 | } | 2577 | } |
2575 | /* make sure no MSIx interrupts are left pending */ | 2578 | /* make sure no MSIx interrupts are left pending */ |
@@ -2597,7 +2600,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) | |||
2597 | kfree(dd->cspec->sendgrhchk); | 2600 | kfree(dd->cspec->sendgrhchk); |
2598 | kfree(dd->cspec->sendibchk); | 2601 | kfree(dd->cspec->sendibchk); |
2599 | kfree(dd->cspec->msix_entries); | 2602 | kfree(dd->cspec->msix_entries); |
2600 | kfree(dd->cspec->msix_arg); | ||
2601 | for (i = 0; i < dd->num_pports; i++) { | 2603 | for (i = 0; i < dd->num_pports; i++) { |
2602 | unsigned long flags; | 2604 | unsigned long flags; |
2603 | u32 mask = QSFP_GPIO_MOD_PRS_N | | 2605 | u32 mask = QSFP_GPIO_MOD_PRS_N | |
@@ -3070,6 +3072,8 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) | |||
3070 | int ret, i, msixnum; | 3072 | int ret, i, msixnum; |
3071 | u64 redirect[6]; | 3073 | u64 redirect[6]; |
3072 | u64 mask; | 3074 | u64 mask; |
3075 | const struct cpumask *local_mask; | ||
3076 | int firstcpu, secondcpu = 0, currrcvcpu = 0; | ||
3073 | 3077 | ||
3074 | if (!dd->num_pports) | 3078 | if (!dd->num_pports) |
3075 | return; | 3079 | return; |
@@ -3118,13 +3122,28 @@ try_intx: | |||
3118 | memset(redirect, 0, sizeof redirect); | 3122 | memset(redirect, 0, sizeof redirect); |
3119 | mask = ~0ULL; | 3123 | mask = ~0ULL; |
3120 | msixnum = 0; | 3124 | msixnum = 0; |
3125 | local_mask = cpumask_of_pcibus(dd->pcidev->bus); | ||
3126 | firstcpu = cpumask_first(local_mask); | ||
3127 | if (firstcpu >= nr_cpu_ids || | ||
3128 | cpumask_weight(local_mask) == num_online_cpus()) { | ||
3129 | local_mask = topology_core_cpumask(0); | ||
3130 | firstcpu = cpumask_first(local_mask); | ||
3131 | } | ||
3132 | if (firstcpu < nr_cpu_ids) { | ||
3133 | secondcpu = cpumask_next(firstcpu, local_mask); | ||
3134 | if (secondcpu >= nr_cpu_ids) | ||
3135 | secondcpu = firstcpu; | ||
3136 | currrcvcpu = secondcpu; | ||
3137 | } | ||
3121 | for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { | 3138 | for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { |
3122 | irq_handler_t handler; | 3139 | irq_handler_t handler; |
3123 | const char *name; | ||
3124 | void *arg; | 3140 | void *arg; |
3125 | u64 val; | 3141 | u64 val; |
3126 | int lsb, reg, sh; | 3142 | int lsb, reg, sh; |
3127 | 3143 | ||
3144 | dd->cspec->msix_entries[msixnum]. | ||
3145 | name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] | ||
3146 | = '\0'; | ||
3128 | if (i < ARRAY_SIZE(irq_table)) { | 3147 | if (i < ARRAY_SIZE(irq_table)) { |
3129 | if (irq_table[i].port) { | 3148 | if (irq_table[i].port) { |
3130 | /* skip if for a non-configured port */ | 3149 | /* skip if for a non-configured port */ |
@@ -3135,7 +3154,11 @@ try_intx: | |||
3135 | arg = dd; | 3154 | arg = dd; |
3136 | lsb = irq_table[i].lsb; | 3155 | lsb = irq_table[i].lsb; |
3137 | handler = irq_table[i].handler; | 3156 | handler = irq_table[i].handler; |
3138 | name = irq_table[i].name; | 3157 | snprintf(dd->cspec->msix_entries[msixnum].name, |
3158 | sizeof(dd->cspec->msix_entries[msixnum].name) | ||
3159 | - 1, | ||
3160 | QIB_DRV_NAME "%d%s", dd->unit, | ||
3161 | irq_table[i].name); | ||
3139 | } else { | 3162 | } else { |
3140 | unsigned ctxt; | 3163 | unsigned ctxt; |
3141 | 3164 | ||
@@ -3148,23 +3171,28 @@ try_intx: | |||
3148 | continue; | 3171 | continue; |
3149 | lsb = QIB_I_RCVAVAIL_LSB + ctxt; | 3172 | lsb = QIB_I_RCVAVAIL_LSB + ctxt; |
3150 | handler = qib_7322pintr; | 3173 | handler = qib_7322pintr; |
3151 | name = QIB_DRV_NAME " (kctx)"; | 3174 | snprintf(dd->cspec->msix_entries[msixnum].name, |
3175 | sizeof(dd->cspec->msix_entries[msixnum].name) | ||
3176 | - 1, | ||
3177 | QIB_DRV_NAME "%d (kctx)", dd->unit); | ||
3152 | } | 3178 | } |
3153 | ret = request_irq(dd->cspec->msix_entries[msixnum].vector, | 3179 | ret = request_irq( |
3154 | handler, 0, name, arg); | 3180 | dd->cspec->msix_entries[msixnum].msix.vector, |
3181 | handler, 0, dd->cspec->msix_entries[msixnum].name, | ||
3182 | arg); | ||
3155 | if (ret) { | 3183 | if (ret) { |
3156 | /* | 3184 | /* |
3157 | * Shouldn't happen since the enable said we could | 3185 | * Shouldn't happen since the enable said we could |
3158 | * have as many as we are trying to setup here. | 3186 | * have as many as we are trying to setup here. |
3159 | */ | 3187 | */ |
3160 | qib_dev_err(dd, "Couldn't setup MSIx " | 3188 | qib_dev_err(dd, "Couldn't setup MSIx " |
3161 | "interrupt (vec=%d, irq=%d): %d\n", msixnum, | 3189 | "interrupt (vec=%d, irq=%d): %d\n", msixnum, |
3162 | dd->cspec->msix_entries[msixnum].vector, | 3190 | dd->cspec->msix_entries[msixnum].msix.vector, |
3163 | ret); | 3191 | ret); |
3164 | qib_7322_nomsix(dd); | 3192 | qib_7322_nomsix(dd); |
3165 | goto try_intx; | 3193 | goto try_intx; |
3166 | } | 3194 | } |
3167 | dd->cspec->msix_arg[msixnum] = arg; | 3195 | dd->cspec->msix_entries[msixnum].arg = arg; |
3168 | if (lsb >= 0) { | 3196 | if (lsb >= 0) { |
3169 | reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; | 3197 | reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; |
3170 | sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * | 3198 | sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * |
@@ -3174,6 +3202,25 @@ try_intx: | |||
3174 | } | 3202 | } |
3175 | val = qib_read_kreg64(dd, 2 * msixnum + 1 + | 3203 | val = qib_read_kreg64(dd, 2 * msixnum + 1 + |
3176 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | 3204 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); |
3205 | if (firstcpu < nr_cpu_ids && | ||
3206 | zalloc_cpumask_var( | ||
3207 | &dd->cspec->msix_entries[msixnum].mask, | ||
3208 | GFP_KERNEL)) { | ||
3209 | if (handler == qib_7322pintr) { | ||
3210 | cpumask_set_cpu(currrcvcpu, | ||
3211 | dd->cspec->msix_entries[msixnum].mask); | ||
3212 | currrcvcpu = cpumask_next(currrcvcpu, | ||
3213 | local_mask); | ||
3214 | if (currrcvcpu >= nr_cpu_ids) | ||
3215 | currrcvcpu = secondcpu; | ||
3216 | } else { | ||
3217 | cpumask_set_cpu(firstcpu, | ||
3218 | dd->cspec->msix_entries[msixnum].mask); | ||
3219 | } | ||
3220 | irq_set_affinity_hint( | ||
3221 | dd->cspec->msix_entries[msixnum].msix.vector, | ||
3222 | dd->cspec->msix_entries[msixnum].mask); | ||
3223 | } | ||
3177 | msixnum++; | 3224 | msixnum++; |
3178 | } | 3225 | } |
3179 | /* Initialize the vector mapping */ | 3226 | /* Initialize the vector mapping */ |
@@ -3365,7 +3412,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) | |||
3365 | if (msix_entries) { | 3412 | if (msix_entries) { |
3366 | /* restore the MSIx vector address and data if saved above */ | 3413 | /* restore the MSIx vector address and data if saved above */ |
3367 | for (i = 0; i < msix_entries; i++) { | 3414 | for (i = 0; i < msix_entries; i++) { |
3368 | dd->cspec->msix_entries[i].entry = i; | 3415 | dd->cspec->msix_entries[i].msix.entry = i; |
3369 | if (!msix_vecsave || !msix_vecsave[2 * i]) | 3416 | if (!msix_vecsave || !msix_vecsave[2 * i]) |
3370 | continue; | 3417 | continue; |
3371 | qib_write_kreg(dd, 2 * i + | 3418 | qib_write_kreg(dd, 2 * i + |
@@ -6865,15 +6912,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | |||
6865 | 6912 | ||
6866 | tabsize = actual_cnt; | 6913 | tabsize = actual_cnt; |
6867 | dd->cspec->msix_entries = kmalloc(tabsize * | 6914 | dd->cspec->msix_entries = kmalloc(tabsize * |
6868 | sizeof(struct msix_entry), GFP_KERNEL); | 6915 | sizeof(struct qib_msix_entry), GFP_KERNEL); |
6869 | dd->cspec->msix_arg = kmalloc(tabsize * | 6916 | if (!dd->cspec->msix_entries) { |
6870 | sizeof(void *), GFP_KERNEL); | ||
6871 | if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) { | ||
6872 | qib_dev_err(dd, "No memory for MSIx table\n"); | 6917 | qib_dev_err(dd, "No memory for MSIx table\n"); |
6873 | tabsize = 0; | 6918 | tabsize = 0; |
6874 | } | 6919 | } |
6875 | for (i = 0; i < tabsize; i++) | 6920 | for (i = 0; i < tabsize; i++) |
6876 | dd->cspec->msix_entries[i].entry = i; | 6921 | dd->cspec->msix_entries[i].msix.entry = i; |
6877 | 6922 | ||
6878 | if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) | 6923 | if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) |
6879 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | 6924 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 3b3745f261f0..c4ff788823b5 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
@@ -433,7 +433,6 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
433 | struct qib_pportdata *ppd; | 433 | struct qib_pportdata *ppd; |
434 | struct qib_ibport *ibp; | 434 | struct qib_ibport *ibp; |
435 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; | 435 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; |
436 | u16 lid; | ||
437 | u8 mtu; | 436 | u8 mtu; |
438 | int ret; | 437 | int ret; |
439 | u32 state; | 438 | u32 state; |
@@ -469,8 +468,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
469 | ibp->mkeyprot == 1)) | 468 | ibp->mkeyprot == 1)) |
470 | pip->mkey = ibp->mkey; | 469 | pip->mkey = ibp->mkey; |
471 | pip->gid_prefix = ibp->gid_prefix; | 470 | pip->gid_prefix = ibp->gid_prefix; |
472 | lid = ppd->lid; | 471 | pip->lid = cpu_to_be16(ppd->lid); |
473 | pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; | ||
474 | pip->sm_lid = cpu_to_be16(ibp->sm_lid); | 472 | pip->sm_lid = cpu_to_be16(ibp->sm_lid); |
475 | pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); | 473 | pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); |
476 | /* pip->diag_code; */ | 474 | /* pip->diag_code; */ |
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 0fde788e1100..790646ef5106 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -194,11 +194,24 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd) | |||
194 | } | 194 | } |
195 | 195 | ||
196 | static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, | 196 | static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, |
197 | struct msix_entry *msix_entry) | 197 | struct qib_msix_entry *qib_msix_entry) |
198 | { | 198 | { |
199 | int ret; | 199 | int ret; |
200 | u32 tabsize = 0; | 200 | u32 tabsize = 0; |
201 | u16 msix_flags; | 201 | u16 msix_flags; |
202 | struct msix_entry *msix_entry; | ||
203 | int i; | ||
204 | |||
205 | /* We can't pass qib_msix_entry array to qib_msix_setup | ||
206 | * so use a dummy msix_entry array and copy the allocated | ||
207 | * irq back to the qib_msix_entry array. */ | ||
208 | msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); | ||
209 | if (!msix_entry) { | ||
210 | ret = -ENOMEM; | ||
211 | goto do_intx; | ||
212 | } | ||
213 | for (i = 0; i < *msixcnt; i++) | ||
214 | msix_entry[i] = qib_msix_entry[i].msix; | ||
202 | 215 | ||
203 | pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); | 216 | pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); |
204 | tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); | 217 | tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); |
@@ -209,11 +222,15 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, | |||
209 | tabsize = ret; | 222 | tabsize = ret; |
210 | ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); | 223 | ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); |
211 | } | 224 | } |
225 | do_intx: | ||
212 | if (ret) { | 226 | if (ret) { |
213 | qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " | 227 | qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " |
214 | "falling back to INTx\n", tabsize, ret); | 228 | "falling back to INTx\n", tabsize, ret); |
215 | tabsize = 0; | 229 | tabsize = 0; |
216 | } | 230 | } |
231 | for (i = 0; i < tabsize; i++) | ||
232 | qib_msix_entry[i].msix = msix_entry[i]; | ||
233 | kfree(msix_entry); | ||
217 | *msixcnt = tabsize; | 234 | *msixcnt = tabsize; |
218 | 235 | ||
219 | if (ret) | 236 | if (ret) |
@@ -251,7 +268,7 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos) | |||
251 | } | 268 | } |
252 | 269 | ||
253 | int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, | 270 | int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, |
254 | struct msix_entry *entry) | 271 | struct qib_msix_entry *entry) |
255 | { | 272 | { |
256 | u16 linkstat, speed; | 273 | u16 linkstat, speed; |
257 | int pos = 0, pose, ret = 1; | 274 | int pos = 0, pose, ret = 1; |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 894afac26f3b..765b4cbaa020 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -2048,7 +2048,6 @@ send_last: | |||
2048 | wc.pkey_index = 0; | 2048 | wc.pkey_index = 0; |
2049 | wc.dlid_path_bits = 0; | 2049 | wc.dlid_path_bits = 0; |
2050 | wc.port_num = 0; | 2050 | wc.port_num = 0; |
2051 | wc.csum_ok = 0; | ||
2052 | /* Signal completion event if the solicited bit is set. */ | 2051 | /* Signal completion event if the solicited bit is set. */ |
2053 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 2052 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
2054 | (ohdr->bth[0] & | 2053 | (ohdr->bth[0] & |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 847e7afdfd94..7ce2ac2ed219 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -422,7 +422,6 @@ last_imm: | |||
422 | wc.pkey_index = 0; | 422 | wc.pkey_index = 0; |
423 | wc.dlid_path_bits = 0; | 423 | wc.dlid_path_bits = 0; |
424 | wc.port_num = 0; | 424 | wc.port_num = 0; |
425 | wc.csum_ok = 0; | ||
426 | /* Signal completion event if the solicited bit is set. */ | 425 | /* Signal completion event if the solicited bit is set. */ |
427 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 426 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
428 | (ohdr->bth[0] & | 427 | (ohdr->bth[0] & |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 4115be54ba3b..5c1bc995e560 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -296,7 +296,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
296 | dev->stats.rx_bytes += skb->len; | 296 | dev->stats.rx_bytes += skb->len; |
297 | 297 | ||
298 | skb->dev = dev; | 298 | skb->dev = dev; |
299 | if ((dev->features & NETIF_F_RXCSUM) && likely(wc->csum_ok)) | 299 | if ((dev->features & NETIF_F_RXCSUM) && |
300 | likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) | ||
300 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 301 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
301 | 302 | ||
302 | napi_gro_receive(&priv->napi, skb); | 303 | napi_gro_receive(&priv->napi, skb); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9a43cb07f294..db43b3117168 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -364,6 +364,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, | |||
364 | } | 364 | } |
365 | ib_conn = ep->dd_data; | 365 | ib_conn = ep->dd_data; |
366 | 366 | ||
367 | if (iser_alloc_rx_descriptors(ib_conn)) | ||
368 | return -ENOMEM; | ||
369 | |||
367 | /* binds the iSER connection retrieved from the previously | 370 | /* binds the iSER connection retrieved from the previously |
368 | * connected ep_handle to the iSCSI layer connection. exchanges | 371 | * connected ep_handle to the iSCSI layer connection. exchanges |
369 | * connection pointers */ | 372 | * connection pointers */ |
@@ -398,19 +401,6 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) | |||
398 | iser_conn->ib_conn = NULL; | 401 | iser_conn->ib_conn = NULL; |
399 | } | 402 | } |
400 | 403 | ||
401 | static int | ||
402 | iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) | ||
403 | { | ||
404 | struct iscsi_conn *conn = cls_conn->dd_data; | ||
405 | int err; | ||
406 | |||
407 | err = iser_conn_set_full_featured_mode(conn); | ||
408 | if (err) | ||
409 | return err; | ||
410 | |||
411 | return iscsi_conn_start(cls_conn); | ||
412 | } | ||
413 | |||
414 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) | 404 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) |
415 | { | 405 | { |
416 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); | 406 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); |
@@ -724,7 +714,7 @@ static struct iscsi_transport iscsi_iser_transport = { | |||
724 | .get_conn_param = iscsi_conn_get_param, | 714 | .get_conn_param = iscsi_conn_get_param, |
725 | .get_ep_param = iscsi_iser_get_ep_param, | 715 | .get_ep_param = iscsi_iser_get_ep_param, |
726 | .get_session_param = iscsi_session_get_param, | 716 | .get_session_param = iscsi_session_get_param, |
727 | .start_conn = iscsi_iser_conn_start, | 717 | .start_conn = iscsi_conn_start, |
728 | .stop_conn = iscsi_iser_conn_stop, | 718 | .stop_conn = iscsi_iser_conn_stop, |
729 | /* iscsi host params */ | 719 | /* iscsi host params */ |
730 | .get_host_param = iscsi_host_get_param, | 720 | .get_host_param = iscsi_host_get_param, |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index db7ea3704da7..296be431a0e9 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -366,4 +366,5 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, | |||
366 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); | 366 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); |
367 | int iser_initialize_task_headers(struct iscsi_task *task, | 367 | int iser_initialize_task_headers(struct iscsi_task *task, |
368 | struct iser_tx_desc *tx_desc); | 368 | struct iser_tx_desc *tx_desc); |
369 | int iser_alloc_rx_descriptors(struct iser_conn *ib_conn); | ||
369 | #endif | 370 | #endif |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index a607542fc796..a00ccd1ca333 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, | |||
170 | } | 170 | } |
171 | 171 | ||
172 | 172 | ||
173 | static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) | 173 | int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) |
174 | { | 174 | { |
175 | int i, j; | 175 | int i, j; |
176 | u64 dma_addr; | 176 | u64 dma_addr; |
@@ -220,18 +220,6 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) | |||
220 | struct iser_rx_desc *rx_desc; | 220 | struct iser_rx_desc *rx_desc; |
221 | struct iser_device *device = ib_conn->device; | 221 | struct iser_device *device = ib_conn->device; |
222 | 222 | ||
223 | if (ib_conn->login_buf) { | ||
224 | if (ib_conn->login_req_dma) | ||
225 | ib_dma_unmap_single(device->ib_device, | ||
226 | ib_conn->login_req_dma, | ||
227 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); | ||
228 | if (ib_conn->login_resp_dma) | ||
229 | ib_dma_unmap_single(device->ib_device, | ||
230 | ib_conn->login_resp_dma, | ||
231 | ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); | ||
232 | kfree(ib_conn->login_buf); | ||
233 | } | ||
234 | |||
235 | if (!ib_conn->rx_descs) | 223 | if (!ib_conn->rx_descs) |
236 | return; | 224 | return; |
237 | 225 | ||
@@ -242,23 +230,24 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) | |||
242 | kfree(ib_conn->rx_descs); | 230 | kfree(ib_conn->rx_descs); |
243 | } | 231 | } |
244 | 232 | ||
245 | /** | 233 | static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) |
246 | * iser_conn_set_full_featured_mode - (iSER API) | ||
247 | */ | ||
248 | int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) | ||
249 | { | 234 | { |
250 | struct iscsi_iser_conn *iser_conn = conn->dd_data; | 235 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
251 | 236 | ||
252 | iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); | 237 | iser_dbg("req op %x flags %x\n", req->opcode, req->flags); |
253 | 238 | /* check if this is the last login - going to full feature phase */ | |
254 | /* Check that there is no posted recv or send buffers left - */ | 239 | if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) |
255 | /* they must be consumed during the login phase */ | 240 | return 0; |
256 | BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0); | ||
257 | BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); | ||
258 | 241 | ||
259 | if (iser_alloc_rx_descriptors(iser_conn->ib_conn)) | 242 | /* |
260 | return -ENOMEM; | 243 | * Check that there is one posted recv buffer (for the last login |
244 | * response) and no posted send buffers left - they must have been | ||
245 | * consumed during previous login phases. | ||
246 | */ | ||
247 | WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); | ||
248 | WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); | ||
261 | 249 | ||
250 | iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); | ||
262 | /* Initial post receive buffers */ | 251 | /* Initial post receive buffers */ |
263 | if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) | 252 | if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) |
264 | return -ENOMEM; | 253 | return -ENOMEM; |
@@ -438,6 +427,9 @@ int iser_send_control(struct iscsi_conn *conn, | |||
438 | err = iser_post_recvl(iser_conn->ib_conn); | 427 | err = iser_post_recvl(iser_conn->ib_conn); |
439 | if (err) | 428 | if (err) |
440 | goto send_control_error; | 429 | goto send_control_error; |
430 | err = iser_post_rx_bufs(conn, task->hdr); | ||
431 | if (err) | ||
432 | goto send_control_error; | ||
441 | } | 433 | } |
442 | 434 | ||
443 | err = iser_post_send(iser_conn->ib_conn, mdesc); | 435 | err = iser_post_send(iser_conn->ib_conn, mdesc); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index e28877c4ce15..14224ba44fd8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -274,6 +274,18 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) | |||
274 | ib_conn->cma_id = NULL; | 274 | ib_conn->cma_id = NULL; |
275 | kfree(ib_conn->page_vec); | 275 | kfree(ib_conn->page_vec); |
276 | 276 | ||
277 | if (ib_conn->login_buf) { | ||
278 | if (ib_conn->login_req_dma) | ||
279 | ib_dma_unmap_single(ib_conn->device->ib_device, | ||
280 | ib_conn->login_req_dma, | ||
281 | ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); | ||
282 | if (ib_conn->login_resp_dma) | ||
283 | ib_dma_unmap_single(ib_conn->device->ib_device, | ||
284 | ib_conn->login_resp_dma, | ||
285 | ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); | ||
286 | kfree(ib_conn->login_buf); | ||
287 | } | ||
288 | |||
277 | return 0; | 289 | return 0; |
278 | } | 290 | } |
279 | 291 | ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 2b73d43cd691..ebe33d960d77 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -3450,7 +3450,7 @@ static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) | |||
3450 | 3450 | ||
3451 | nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); | 3451 | nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); |
3452 | if (!nacl) { | 3452 | if (!nacl) { |
3453 | printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n"); | 3453 | printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n"); |
3454 | return NULL; | 3454 | return NULL; |
3455 | } | 3455 | } |
3456 | 3456 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8fa41f3082cf..780b5adf8e7d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -79,7 +79,8 @@ enum { | |||
79 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | 79 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ |
80 | (1ull << MLX4_EVENT_TYPE_CMD) | \ | 80 | (1ull << MLX4_EVENT_TYPE_CMD) | \ |
81 | (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ | 81 | (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ |
82 | (1ull << MLX4_EVENT_TYPE_FLR_EVENT)) | 82 | (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ |
83 | (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) | ||
83 | 84 | ||
84 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) | 85 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) |
85 | { | 86 | { |
@@ -443,6 +444,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |||
443 | queue_work(priv->mfunc.master.comm_wq, | 444 | queue_work(priv->mfunc.master.comm_wq, |
444 | &priv->mfunc.master.slave_flr_event_work); | 445 | &priv->mfunc.master.slave_flr_event_work); |
445 | break; | 446 | break; |
447 | |||
448 | case MLX4_EVENT_TYPE_FATAL_WARNING: | ||
449 | if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { | ||
450 | if (mlx4_is_master(dev)) | ||
451 | for (i = 0; i < dev->num_slaves; i++) { | ||
452 | mlx4_dbg(dev, "%s: Sending " | ||
453 | "MLX4_FATAL_WARNING_SUBTYPE_WARMING" | ||
454 | " to slave: %d\n", __func__, i); | ||
455 | if (i == dev->caps.function) | ||
456 | continue; | ||
457 | mlx4_slave_event(dev, i, eqe); | ||
458 | } | ||
459 | mlx4_err(dev, "Temperature Threshold was reached! " | ||
460 | "Threshold: %d celsius degrees; " | ||
461 | "Current Temperature: %d\n", | ||
462 | be16_to_cpu(eqe->event.warming.warning_threshold), | ||
463 | be16_to_cpu(eqe->event.warming.current_temperature)); | ||
464 | } else | ||
465 | mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " | ||
466 | "subtype %02x on EQ %d at index %u. owner=%x, " | ||
467 | "nent=0x%x, slave=%x, ownership=%s\n", | ||
468 | eqe->type, eqe->subtype, eq->eqn, | ||
469 | eq->cons_index, eqe->owner, eq->nent, | ||
470 | eqe->slave_id, | ||
471 | !!(eqe->owner & 0x80) ^ | ||
472 | !!(eq->cons_index & eq->nent) ? "HW" : "SW"); | ||
473 | |||
474 | break; | ||
475 | |||
446 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: | 476 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: |
447 | case MLX4_EVENT_TYPE_ECC_DETECT: | 477 | case MLX4_EVENT_TYPE_ECC_DETECT: |
448 | default: | 478 | default: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 678558b502fc..a6ee22b319f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -394,7 +394,7 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) | |||
394 | return ret; | 394 | return ret; |
395 | } | 395 | } |
396 | 396 | ||
397 | static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) | 397 | int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) |
398 | { | 398 | { |
399 | struct mlx4_priv *priv = mlx4_priv(dev); | 399 | struct mlx4_priv *priv = mlx4_priv(dev); |
400 | struct mlx4_slave_state *s_slave; | 400 | struct mlx4_slave_state *s_slave; |
@@ -647,6 +647,99 @@ out: | |||
647 | return err ? err : count; | 647 | return err ? err : count; |
648 | } | 648 | } |
649 | 649 | ||
650 | enum ibta_mtu { | ||
651 | IB_MTU_256 = 1, | ||
652 | IB_MTU_512 = 2, | ||
653 | IB_MTU_1024 = 3, | ||
654 | IB_MTU_2048 = 4, | ||
655 | IB_MTU_4096 = 5 | ||
656 | }; | ||
657 | |||
658 | static inline int int_to_ibta_mtu(int mtu) | ||
659 | { | ||
660 | switch (mtu) { | ||
661 | case 256: return IB_MTU_256; | ||
662 | case 512: return IB_MTU_512; | ||
663 | case 1024: return IB_MTU_1024; | ||
664 | case 2048: return IB_MTU_2048; | ||
665 | case 4096: return IB_MTU_4096; | ||
666 | default: return -1; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | static inline int ibta_mtu_to_int(enum ibta_mtu mtu) | ||
671 | { | ||
672 | switch (mtu) { | ||
673 | case IB_MTU_256: return 256; | ||
674 | case IB_MTU_512: return 512; | ||
675 | case IB_MTU_1024: return 1024; | ||
676 | case IB_MTU_2048: return 2048; | ||
677 | case IB_MTU_4096: return 4096; | ||
678 | default: return -1; | ||
679 | } | ||
680 | } | ||
681 | |||
682 | static ssize_t show_port_ib_mtu(struct device *dev, | ||
683 | struct device_attribute *attr, | ||
684 | char *buf) | ||
685 | { | ||
686 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
687 | port_mtu_attr); | ||
688 | struct mlx4_dev *mdev = info->dev; | ||
689 | |||
690 | if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) | ||
691 | mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); | ||
692 | |||
693 | sprintf(buf, "%d\n", | ||
694 | ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); | ||
695 | return strlen(buf); | ||
696 | } | ||
697 | |||
698 | static ssize_t set_port_ib_mtu(struct device *dev, | ||
699 | struct device_attribute *attr, | ||
700 | const char *buf, size_t count) | ||
701 | { | ||
702 | struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, | ||
703 | port_mtu_attr); | ||
704 | struct mlx4_dev *mdev = info->dev; | ||
705 | struct mlx4_priv *priv = mlx4_priv(mdev); | ||
706 | int err, port, mtu, ibta_mtu = -1; | ||
707 | |||
708 | if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { | ||
709 | mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); | ||
710 | return -EINVAL; | ||
711 | } | ||
712 | |||
713 | err = sscanf(buf, "%d", &mtu); | ||
714 | if (err > 0) | ||
715 | ibta_mtu = int_to_ibta_mtu(mtu); | ||
716 | |||
717 | if (err <= 0 || ibta_mtu < 0) { | ||
718 | mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); | ||
719 | return -EINVAL; | ||
720 | } | ||
721 | |||
722 | mdev->caps.port_ib_mtu[info->port] = ibta_mtu; | ||
723 | |||
724 | mlx4_stop_sense(mdev); | ||
725 | mutex_lock(&priv->port_mutex); | ||
726 | mlx4_unregister_device(mdev); | ||
727 | for (port = 1; port <= mdev->caps.num_ports; port++) { | ||
728 | mlx4_CLOSE_PORT(mdev, port); | ||
729 | err = mlx4_SET_PORT(mdev, port); | ||
730 | if (err) { | ||
731 | mlx4_err(mdev, "Failed to set port %d, " | ||
732 | "aborting\n", port); | ||
733 | goto err_set_port; | ||
734 | } | ||
735 | } | ||
736 | err = mlx4_register_device(mdev); | ||
737 | err_set_port: | ||
738 | mutex_unlock(&priv->port_mutex); | ||
739 | mlx4_start_sense(mdev); | ||
740 | return err ? err : count; | ||
741 | } | ||
742 | |||
650 | static int mlx4_load_fw(struct mlx4_dev *dev) | 743 | static int mlx4_load_fw(struct mlx4_dev *dev) |
651 | { | 744 | { |
652 | struct mlx4_priv *priv = mlx4_priv(dev); | 745 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -1131,6 +1224,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
1131 | goto err_stop_fw; | 1224 | goto err_stop_fw; |
1132 | } | 1225 | } |
1133 | 1226 | ||
1227 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; | ||
1228 | |||
1134 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars); | 1229 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars); |
1135 | init_hca.uar_page_sz = PAGE_SHIFT - 12; | 1230 | init_hca.uar_page_sz = PAGE_SHIFT - 12; |
1136 | 1231 | ||
@@ -1361,12 +1456,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
1361 | "with caps = 0\n", port, err); | 1456 | "with caps = 0\n", port, err); |
1362 | dev->caps.ib_port_def_cap[port] = ib_port_default_caps; | 1457 | dev->caps.ib_port_def_cap[port] = ib_port_default_caps; |
1363 | 1458 | ||
1364 | err = mlx4_check_ext_port_caps(dev, port); | 1459 | if (mlx4_is_mfunc(dev)) |
1365 | if (err) | 1460 | dev->caps.port_ib_mtu[port] = IB_MTU_2048; |
1366 | mlx4_warn(dev, "failed to get port %d extended " | 1461 | else |
1367 | "port capabilities support info (%d)." | 1462 | dev->caps.port_ib_mtu[port] = IB_MTU_4096; |
1368 | " Assuming not supported\n", | ||
1369 | port, err); | ||
1370 | 1463 | ||
1371 | err = mlx4_SET_PORT(dev, port); | 1464 | err = mlx4_SET_PORT(dev, port); |
1372 | if (err) { | 1465 | if (err) { |
@@ -1522,6 +1615,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | |||
1522 | info->port = -1; | 1615 | info->port = -1; |
1523 | } | 1616 | } |
1524 | 1617 | ||
1618 | sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); | ||
1619 | info->port_mtu_attr.attr.name = info->dev_mtu_name; | ||
1620 | if (mlx4_is_mfunc(dev)) | ||
1621 | info->port_mtu_attr.attr.mode = S_IRUGO; | ||
1622 | else { | ||
1623 | info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
1624 | info->port_mtu_attr.store = set_port_ib_mtu; | ||
1625 | } | ||
1626 | info->port_mtu_attr.show = show_port_ib_mtu; | ||
1627 | sysfs_attr_init(&info->port_mtu_attr.attr); | ||
1628 | |||
1629 | err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); | ||
1630 | if (err) { | ||
1631 | mlx4_err(dev, "Failed to create mtu file for port %d\n", port); | ||
1632 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | ||
1633 | info->port = -1; | ||
1634 | } | ||
1635 | |||
1525 | return err; | 1636 | return err; |
1526 | } | 1637 | } |
1527 | 1638 | ||
@@ -1531,6 +1642,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | |||
1531 | return; | 1642 | return; |
1532 | 1643 | ||
1533 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); | 1644 | device_remove_file(&info->dev->pdev->dev, &info->port_attr); |
1645 | device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); | ||
1534 | } | 1646 | } |
1535 | 1647 | ||
1536 | static int mlx4_init_steering(struct mlx4_dev *dev) | 1648 | static int mlx4_init_steering(struct mlx4_dev *dev) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index c92269f8c057..a99a13e9e695 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -363,6 +363,10 @@ struct mlx4_eqe { | |||
363 | struct { | 363 | struct { |
364 | __be32 slave_id; | 364 | __be32 slave_id; |
365 | } __packed flr_event; | 365 | } __packed flr_event; |
366 | struct { | ||
367 | __be16 current_temperature; | ||
368 | __be16 warning_threshold; | ||
369 | } __packed warming; | ||
366 | } event; | 370 | } event; |
367 | u8 slave_id; | 371 | u8 slave_id; |
368 | u8 reserved3[2]; | 372 | u8 reserved3[2]; |
@@ -399,7 +403,7 @@ struct mlx4_profile { | |||
399 | int num_cq; | 403 | int num_cq; |
400 | int num_mcg; | 404 | int num_mcg; |
401 | int num_mpt; | 405 | int num_mpt; |
402 | int num_mtt; | 406 | unsigned num_mtt; |
403 | }; | 407 | }; |
404 | 408 | ||
405 | struct mlx4_fw { | 409 | struct mlx4_fw { |
@@ -682,6 +686,8 @@ struct mlx4_port_info { | |||
682 | char dev_name[16]; | 686 | char dev_name[16]; |
683 | struct device_attribute port_attr; | 687 | struct device_attribute port_attr; |
684 | enum mlx4_port_type tmp_type; | 688 | enum mlx4_port_type tmp_type; |
689 | char dev_mtu_name[16]; | ||
690 | struct device_attribute port_mtu_attr; | ||
685 | struct mlx4_mac_table mac_table; | 691 | struct mlx4_mac_table mac_table; |
686 | struct radix_tree_root mac_tree; | 692 | struct radix_tree_root mac_tree; |
687 | struct mlx4_vlan_table vlan_table; | 693 | struct mlx4_vlan_table vlan_table; |
@@ -1025,7 +1031,6 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
1025 | struct mlx4_cmd_mailbox *outbox, | 1031 | struct mlx4_cmd_mailbox *outbox, |
1026 | struct mlx4_cmd_info *cmd); | 1032 | struct mlx4_cmd_info *cmd); |
1027 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); | 1033 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); |
1028 | int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port); | ||
1029 | 1034 | ||
1030 | 1035 | ||
1031 | int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | 1036 | int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 25a80d71fb2a..5b7c06e0cd05 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -816,6 +816,9 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | |||
816 | u64 mtt_offset; | 816 | u64 mtt_offset; |
817 | int err = -ENOMEM; | 817 | int err = -ENOMEM; |
818 | 818 | ||
819 | if (max_maps > dev->caps.max_fmr_maps) | ||
820 | return -EINVAL; | ||
821 | |||
819 | if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) | 822 | if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) |
820 | return -EINVAL; | 823 | return -EINVAL; |
821 | 824 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index f44ae555bf43..409d444c4df5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -590,49 +590,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) | |||
590 | return err; | 590 | return err; |
591 | } | 591 | } |
592 | 592 | ||
593 | int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) | ||
594 | { | ||
595 | struct mlx4_cmd_mailbox *inmailbox, *outmailbox; | ||
596 | u8 *inbuf, *outbuf; | ||
597 | int err, packet_error; | ||
598 | |||
599 | inmailbox = mlx4_alloc_cmd_mailbox(dev); | ||
600 | if (IS_ERR(inmailbox)) | ||
601 | return PTR_ERR(inmailbox); | ||
602 | |||
603 | outmailbox = mlx4_alloc_cmd_mailbox(dev); | ||
604 | if (IS_ERR(outmailbox)) { | ||
605 | mlx4_free_cmd_mailbox(dev, inmailbox); | ||
606 | return PTR_ERR(outmailbox); | ||
607 | } | ||
608 | |||
609 | inbuf = inmailbox->buf; | ||
610 | outbuf = outmailbox->buf; | ||
611 | memset(inbuf, 0, 256); | ||
612 | memset(outbuf, 0, 256); | ||
613 | inbuf[0] = 1; | ||
614 | inbuf[1] = 1; | ||
615 | inbuf[2] = 1; | ||
616 | inbuf[3] = 1; | ||
617 | |||
618 | *(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO; | ||
619 | *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); | ||
620 | |||
621 | err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, | ||
622 | MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, | ||
623 | MLX4_CMD_NATIVE); | ||
624 | |||
625 | packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4)); | ||
626 | |||
627 | dev->caps.ext_port_cap[port] = (!err && !packet_error) ? | ||
628 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO | ||
629 | : 0; | ||
630 | |||
631 | mlx4_free_cmd_mailbox(dev, inmailbox); | ||
632 | mlx4_free_cmd_mailbox(dev, outmailbox); | ||
633 | return err; | ||
634 | } | ||
635 | |||
636 | static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, | 593 | static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, |
637 | u8 op_mod, struct mlx4_cmd_mailbox *inbox) | 594 | u8 op_mod, struct mlx4_cmd_mailbox *inbox) |
638 | { | 595 | { |
@@ -766,10 +723,18 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, | |||
766 | vhcr->op_modifier, inbox); | 723 | vhcr->op_modifier, inbox); |
767 | } | 724 | } |
768 | 725 | ||
726 | /* bit locations for set port command with zero op modifier */ | ||
727 | enum { | ||
728 | MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ | ||
729 | MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ | ||
730 | MLX4_CHANGE_PORT_VL_CAP = 21, | ||
731 | MLX4_CHANGE_PORT_MTU_CAP = 22, | ||
732 | }; | ||
733 | |||
769 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | 734 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) |
770 | { | 735 | { |
771 | struct mlx4_cmd_mailbox *mailbox; | 736 | struct mlx4_cmd_mailbox *mailbox; |
772 | int err; | 737 | int err, vl_cap; |
773 | 738 | ||
774 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) | 739 | if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) |
775 | return 0; | 740 | return 0; |
@@ -781,8 +746,19 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) | |||
781 | memset(mailbox->buf, 0, 256); | 746 | memset(mailbox->buf, 0, 256); |
782 | 747 | ||
783 | ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; | 748 | ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; |
784 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, | 749 | |
785 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | 750 | /* IB VL CAP enum isn't used by the firmware, just numerical values */ |
751 | for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { | ||
752 | ((__be32 *) mailbox->buf)[0] = cpu_to_be32( | ||
753 | (1 << MLX4_CHANGE_PORT_MTU_CAP) | | ||
754 | (1 << MLX4_CHANGE_PORT_VL_CAP) | | ||
755 | (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | | ||
756 | (vl_cap << MLX4_SET_PORT_VL_CAP)); | ||
757 | err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, | ||
758 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); | ||
759 | if (err != -ENOMEM) | ||
760 | break; | ||
761 | } | ||
786 | 762 | ||
787 | mlx4_free_cmd_mailbox(dev, mailbox); | 763 | mlx4_free_cmd_mailbox(dev, mailbox); |
788 | return err; | 764 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 1129677daa62..06e5adeb76f7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c | |||
@@ -83,12 +83,31 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
83 | u64 total_size = 0; | 83 | u64 total_size = 0; |
84 | struct mlx4_resource *profile; | 84 | struct mlx4_resource *profile; |
85 | struct mlx4_resource tmp; | 85 | struct mlx4_resource tmp; |
86 | struct sysinfo si; | ||
86 | int i, j; | 87 | int i, j; |
87 | 88 | ||
88 | profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); | 89 | profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); |
89 | if (!profile) | 90 | if (!profile) |
90 | return -ENOMEM; | 91 | return -ENOMEM; |
91 | 92 | ||
93 | /* | ||
94 | * We want to scale the number of MTTs with the size of the | ||
95 | * system memory, since it makes sense to register a lot of | ||
96 | * memory on a system with a lot of memory. As a heuristic, | ||
97 | * make sure we have enough MTTs to cover twice the system | ||
98 | * memory (with PAGE_SIZE entries). | ||
99 | * | ||
100 | * This number has to be a power of two and fit into 32 bits | ||
101 | * due to device limitations, so cap this at 2^31 as well. | ||
102 | * That limits us to 8TB of memory registration per HCA with | ||
103 | * 4KB pages, which is probably OK for the next few months. | ||
104 | */ | ||
105 | si_meminfo(&si); | ||
106 | request->num_mtt = | ||
107 | roundup_pow_of_two(max_t(unsigned, request->num_mtt, | ||
108 | min(1UL << 31, | ||
109 | si.totalram >> (log_mtts_per_seg - 1)))); | ||
110 | |||
92 | profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; | 111 | profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; |
93 | profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; | 112 | profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; |
94 | profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; | 113 | profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index aea61905499b..b8432516d68a 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -101,10 +101,6 @@ enum { | |||
101 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) | 101 | #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) |
102 | 102 | ||
103 | enum { | 103 | enum { |
104 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 | ||
105 | }; | ||
106 | |||
107 | enum { | ||
108 | MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, | 104 | MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, |
109 | MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, | 105 | MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, |
110 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, | 106 | MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, |
@@ -133,6 +129,7 @@ enum mlx4_event { | |||
133 | MLX4_EVENT_TYPE_CMD = 0x0a, | 129 | MLX4_EVENT_TYPE_CMD = 0x0a, |
134 | MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, | 130 | MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, |
135 | MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, | 131 | MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, |
132 | MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, | ||
136 | MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, | 133 | MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, |
137 | MLX4_EVENT_TYPE_NONE = 0xff, | 134 | MLX4_EVENT_TYPE_NONE = 0xff, |
138 | }; | 135 | }; |
@@ -143,6 +140,10 @@ enum { | |||
143 | }; | 140 | }; |
144 | 141 | ||
145 | enum { | 142 | enum { |
143 | MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, | ||
144 | }; | ||
145 | |||
146 | enum { | ||
146 | MLX4_PERM_LOCAL_READ = 1 << 10, | 147 | MLX4_PERM_LOCAL_READ = 1 << 10, |
147 | MLX4_PERM_LOCAL_WRITE = 1 << 11, | 148 | MLX4_PERM_LOCAL_WRITE = 1 << 11, |
148 | MLX4_PERM_REMOTE_READ = 1 << 12, | 149 | MLX4_PERM_REMOTE_READ = 1 << 12, |
@@ -273,6 +274,7 @@ struct mlx4_caps { | |||
273 | int num_comp_vectors; | 274 | int num_comp_vectors; |
274 | int comp_pool; | 275 | int comp_pool; |
275 | int num_mpts; | 276 | int num_mpts; |
277 | int max_fmr_maps; | ||
276 | int num_mtts; | 278 | int num_mtts; |
277 | int fmr_reserved_mtts; | 279 | int fmr_reserved_mtts; |
278 | int reserved_mtts; | 280 | int reserved_mtts; |
@@ -308,7 +310,7 @@ struct mlx4_caps { | |||
308 | u32 port_mask[MLX4_MAX_PORTS + 1]; | 310 | u32 port_mask[MLX4_MAX_PORTS + 1]; |
309 | enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; | 311 | enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; |
310 | u32 max_counters; | 312 | u32 max_counters; |
311 | u8 ext_port_cap[MLX4_MAX_PORTS + 1]; | 313 | u8 port_ib_mtu[MLX4_MAX_PORTS + 1]; |
312 | }; | 314 | }; |
313 | 315 | ||
314 | struct mlx4_buf_list { | 316 | struct mlx4_buf_list { |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index d3b9401b77b0..b513f57e1725 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
@@ -77,6 +77,15 @@ | |||
77 | 77 | ||
78 | #define IB_MGMT_MAX_METHODS 128 | 78 | #define IB_MGMT_MAX_METHODS 128 |
79 | 79 | ||
80 | /* MAD Status field bit masks */ | ||
81 | #define IB_MGMT_MAD_STATUS_SUCCESS 0x0000 | ||
82 | #define IB_MGMT_MAD_STATUS_BUSY 0x0001 | ||
83 | #define IB_MGMT_MAD_STATUS_REDIRECT_REQD 0x0002 | ||
84 | #define IB_MGMT_MAD_STATUS_BAD_VERSION 0x0004 | ||
85 | #define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD 0x0008 | ||
86 | #define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB 0x000c | ||
87 | #define IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE 0x001c | ||
88 | |||
80 | /* RMPP information */ | 89 | /* RMPP information */ |
81 | #define IB_MGMT_RMPP_VERSION 1 | 90 | #define IB_MGMT_RMPP_VERSION 1 |
82 | 91 | ||
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bf5daafe8ecc..c3cca5a4dacd 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -239,6 +239,15 @@ static inline int ib_width_enum_to_int(enum ib_port_width width) | |||
239 | } | 239 | } |
240 | } | 240 | } |
241 | 241 | ||
242 | enum ib_port_speed { | ||
243 | IB_SPEED_SDR = 1, | ||
244 | IB_SPEED_DDR = 2, | ||
245 | IB_SPEED_QDR = 4, | ||
246 | IB_SPEED_FDR10 = 8, | ||
247 | IB_SPEED_FDR = 16, | ||
248 | IB_SPEED_EDR = 32 | ||
249 | }; | ||
250 | |||
242 | struct ib_protocol_stats { | 251 | struct ib_protocol_stats { |
243 | /* TBD... */ | 252 | /* TBD... */ |
244 | }; | 253 | }; |
@@ -509,6 +518,7 @@ enum ib_wc_flags { | |||
509 | IB_WC_GRH = 1, | 518 | IB_WC_GRH = 1, |
510 | IB_WC_WITH_IMM = (1<<1), | 519 | IB_WC_WITH_IMM = (1<<1), |
511 | IB_WC_WITH_INVALIDATE = (1<<2), | 520 | IB_WC_WITH_INVALIDATE = (1<<2), |
521 | IB_WC_IP_CSUM_OK = (1<<3), | ||
512 | }; | 522 | }; |
513 | 523 | ||
514 | struct ib_wc { | 524 | struct ib_wc { |
@@ -529,7 +539,6 @@ struct ib_wc { | |||
529 | u8 sl; | 539 | u8 sl; |
530 | u8 dlid_path_bits; | 540 | u8 dlid_path_bits; |
531 | u8 port_num; /* valid only for DR SMPs on switches */ | 541 | u8 port_num; /* valid only for DR SMPs on switches */ |
532 | int csum_ok; | ||
533 | }; | 542 | }; |
534 | 543 | ||
535 | enum ib_cq_notify_flags { | 544 | enum ib_cq_notify_flags { |