diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 13:33:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 13:33:42 -0400 |
commit | 0c2fe82a9b106f1c03719783134360586d718a69 (patch) | |
tree | ec1a18ca49f1e6724ef3a93727f5f73b7df61931 /drivers/infiniband/core | |
parent | 5f0e685f316a1de6d3af8b23eaf46651faca32ab (diff) | |
parent | f0e88aeb19dac00ed2e09fd4d39ee65f32d5e968 (diff) |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes for the 3.4 merge window from Roland Dreier:
"Nothing big really stands out; by patch count lots of fixes to the
mlx4 driver plus some cleanups and fixes to the core and other
drivers."
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (28 commits)
mlx4_core: Scale size of MTT table with system RAM
mlx4_core: Allow dynamic MTU configuration for IB ports
IB/mlx4: Fix info returned when querying IBoE ports
IB/mlx4: Fix possible missed completion event
mlx4_core: Report thermal error events
mlx4_core: Fix one more static exported function
IB: Change CQE "csum_ok" field to a bit flag
RDMA/iwcm: Reject connect requests if cmid is not in LISTEN state
RDMA/cxgb3: Don't pass irq flags to flush_qp()
mlx4_core: Get rid of redundant ext_port_cap flags
RDMA/ucma: Fix AB-BA deadlock
IB/ehca: Fix ilog2() compile failure
IB: Use central enum for speed instead of hard-coded values
IB/iser: Post initial receive buffers before sending the final login request
IB/iser: Free IB connection resources in the proper place
IB/srp: Consolidate repetitive sysfs code
IB/srp: Use pr_fmt() and pr_err()/pr_warn()
IB/core: Fix SDR rates in sysfs
mlx4: Enforce device max FMR maps in FMR alloc
IB/mlx4: Set bad_wr for invalid send opcode
...
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 21 | ||||
-rw-r--r-- | drivers/infiniband/core/sysfs.c | 27 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 37 |
4 files changed, 67 insertions, 42 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 1a696f76b616..0bb99bb38809 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -624,17 +624,6 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
624 | */ | 624 | */ |
625 | BUG_ON(iw_event->status); | 625 | BUG_ON(iw_event->status); |
626 | 626 | ||
627 | /* | ||
628 | * We could be destroying the listening id. If so, ignore this | ||
629 | * upcall. | ||
630 | */ | ||
631 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
632 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
633 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
634 | goto out; | ||
635 | } | ||
636 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
637 | |||
638 | cm_id = iw_create_cm_id(listen_id_priv->id.device, | 627 | cm_id = iw_create_cm_id(listen_id_priv->id.device, |
639 | listen_id_priv->id.cm_handler, | 628 | listen_id_priv->id.cm_handler, |
640 | listen_id_priv->id.context); | 629 | listen_id_priv->id.context); |
@@ -649,6 +638,19 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
649 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 638 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
650 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; | 639 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; |
651 | 640 | ||
641 | /* | ||
642 | * We could be destroying the listening id. If so, ignore this | ||
643 | * upcall. | ||
644 | */ | ||
645 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
646 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
647 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
648 | iw_cm_reject(cm_id, NULL, 0); | ||
649 | iw_destroy_cm_id(cm_id); | ||
650 | goto out; | ||
651 | } | ||
652 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
653 | |||
652 | ret = alloc_work_entries(cm_id_priv, 3); | 654 | ret = alloc_work_entries(cm_id_priv, 3); |
653 | if (ret) { | 655 | if (ret) { |
654 | iw_cm_reject(cm_id, NULL, 0); | 656 | iw_cm_reject(cm_id, NULL, 0); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 2fe428bba54c..426bb7617ec6 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1842,6 +1842,24 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1842 | } | 1842 | } |
1843 | } | 1843 | } |
1844 | 1844 | ||
1845 | static bool generate_unmatched_resp(struct ib_mad_private *recv, | ||
1846 | struct ib_mad_private *response) | ||
1847 | { | ||
1848 | if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || | ||
1849 | recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { | ||
1850 | memcpy(response, recv, sizeof *response); | ||
1851 | response->header.recv_wc.wc = &response->header.wc; | ||
1852 | response->header.recv_wc.recv_buf.mad = &response->mad.mad; | ||
1853 | response->header.recv_wc.recv_buf.grh = &response->grh; | ||
1854 | response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | ||
1855 | response->mad.mad.mad_hdr.status = | ||
1856 | cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | ||
1857 | |||
1858 | return true; | ||
1859 | } else { | ||
1860 | return false; | ||
1861 | } | ||
1862 | } | ||
1845 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | 1863 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, |
1846 | struct ib_wc *wc) | 1864 | struct ib_wc *wc) |
1847 | { | 1865 | { |
@@ -1963,6 +1981,9 @@ local: | |||
1963 | * or via recv_handler in ib_mad_complete_recv() | 1981 | * or via recv_handler in ib_mad_complete_recv() |
1964 | */ | 1982 | */ |
1965 | recv = NULL; | 1983 | recv = NULL; |
1984 | } else if (generate_unmatched_resp(recv, response)) { | ||
1985 | agent_send_response(&response->mad.mad, &recv->grh, wc, | ||
1986 | port_priv->device, port_num, qp_info->qp->qp_num); | ||
1966 | } | 1987 | } |
1967 | 1988 | ||
1968 | out: | 1989 | out: |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index c61bca30fd2d..83b720ef6c34 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -179,33 +179,36 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
179 | { | 179 | { |
180 | struct ib_port_attr attr; | 180 | struct ib_port_attr attr; |
181 | char *speed = ""; | 181 | char *speed = ""; |
182 | int rate; | 182 | int rate = -1; /* in deci-Gb/sec */ |
183 | ssize_t ret; | 183 | ssize_t ret; |
184 | 184 | ||
185 | ret = ib_query_port(p->ibdev, p->port_num, &attr); | 185 | ret = ib_query_port(p->ibdev, p->port_num, &attr); |
186 | if (ret) | 186 | if (ret) |
187 | return ret; | 187 | return ret; |
188 | 188 | ||
189 | rate = (25 * attr.active_speed) / 10; | ||
190 | |||
191 | switch (attr.active_speed) { | 189 | switch (attr.active_speed) { |
192 | case 2: | 190 | case IB_SPEED_SDR: |
191 | rate = 25; | ||
192 | break; | ||
193 | case IB_SPEED_DDR: | ||
193 | speed = " DDR"; | 194 | speed = " DDR"; |
195 | rate = 50; | ||
194 | break; | 196 | break; |
195 | case 4: | 197 | case IB_SPEED_QDR: |
196 | speed = " QDR"; | 198 | speed = " QDR"; |
199 | rate = 100; | ||
197 | break; | 200 | break; |
198 | case 8: | 201 | case IB_SPEED_FDR10: |
199 | speed = " FDR10"; | 202 | speed = " FDR10"; |
200 | rate = 10; | 203 | rate = 100; |
201 | break; | 204 | break; |
202 | case 16: | 205 | case IB_SPEED_FDR: |
203 | speed = " FDR"; | 206 | speed = " FDR"; |
204 | rate = 14; | 207 | rate = 140; |
205 | break; | 208 | break; |
206 | case 32: | 209 | case IB_SPEED_EDR: |
207 | speed = " EDR"; | 210 | speed = " EDR"; |
208 | rate = 25; | 211 | rate = 250; |
209 | break; | 212 | break; |
210 | } | 213 | } |
211 | 214 | ||
@@ -214,7 +217,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
214 | return -EINVAL; | 217 | return -EINVAL; |
215 | 218 | ||
216 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", | 219 | return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", |
217 | rate, (attr.active_speed == 1) ? ".5" : "", | 220 | rate / 10, rate % 10 ? ".5" : "", |
218 | ib_width_enum_to_int(attr.active_width), speed); | 221 | ib_width_enum_to_int(attr.active_width), speed); |
219 | } | 222 | } |
220 | 223 | ||
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5034a87cc72d..5861cdb22b7c 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -449,24 +449,6 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx) | |||
449 | mutex_unlock(&mut); | 449 | mutex_unlock(&mut); |
450 | } | 450 | } |
451 | 451 | ||
452 | static void ucma_cleanup_events(struct ucma_context *ctx) | ||
453 | { | ||
454 | struct ucma_event *uevent, *tmp; | ||
455 | |||
456 | list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { | ||
457 | if (uevent->ctx != ctx) | ||
458 | continue; | ||
459 | |||
460 | list_del(&uevent->list); | ||
461 | |||
462 | /* clear incoming connections. */ | ||
463 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) | ||
464 | rdma_destroy_id(uevent->cm_id); | ||
465 | |||
466 | kfree(uevent); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static void ucma_cleanup_mc_events(struct ucma_multicast *mc) | 452 | static void ucma_cleanup_mc_events(struct ucma_multicast *mc) |
471 | { | 453 | { |
472 | struct ucma_event *uevent, *tmp; | 454 | struct ucma_event *uevent, *tmp; |
@@ -480,9 +462,16 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) | |||
480 | } | 462 | } |
481 | } | 463 | } |
482 | 464 | ||
465 | /* | ||
466 | * We cannot hold file->mut when calling rdma_destroy_id() or we can | ||
467 | * deadlock. We also acquire file->mut in ucma_event_handler(), and | ||
468 | * rdma_destroy_id() will wait until all callbacks have completed. | ||
469 | */ | ||
483 | static int ucma_free_ctx(struct ucma_context *ctx) | 470 | static int ucma_free_ctx(struct ucma_context *ctx) |
484 | { | 471 | { |
485 | int events_reported; | 472 | int events_reported; |
473 | struct ucma_event *uevent, *tmp; | ||
474 | LIST_HEAD(list); | ||
486 | 475 | ||
487 | /* No new events will be generated after destroying the id. */ | 476 | /* No new events will be generated after destroying the id. */ |
488 | rdma_destroy_id(ctx->cm_id); | 477 | rdma_destroy_id(ctx->cm_id); |
@@ -491,10 +480,20 @@ static int ucma_free_ctx(struct ucma_context *ctx) | |||
491 | 480 | ||
492 | /* Cleanup events not yet reported to the user. */ | 481 | /* Cleanup events not yet reported to the user. */ |
493 | mutex_lock(&ctx->file->mut); | 482 | mutex_lock(&ctx->file->mut); |
494 | ucma_cleanup_events(ctx); | 483 | list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { |
484 | if (uevent->ctx == ctx) | ||
485 | list_move_tail(&uevent->list, &list); | ||
486 | } | ||
495 | list_del(&ctx->list); | 487 | list_del(&ctx->list); |
496 | mutex_unlock(&ctx->file->mut); | 488 | mutex_unlock(&ctx->file->mut); |
497 | 489 | ||
490 | list_for_each_entry_safe(uevent, tmp, &list, list) { | ||
491 | list_del(&uevent->list); | ||
492 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) | ||
493 | rdma_destroy_id(uevent->cm_id); | ||
494 | kfree(uevent); | ||
495 | } | ||
496 | |||
498 | events_reported = ctx->events_reported; | 497 | events_reported = ctx->events_reported; |
499 | kfree(ctx); | 498 | kfree(ctx); |
500 | return events_reported; | 499 | return events_reported; |