diff options
author | David Howells <dhowells@redhat.com> | 2006-12-05 09:37:56 -0500 |
---|---|---|
committer | David Howells <dhowells@warthog.cambridge.redhat.com> | 2006-12-05 09:37:56 -0500 |
commit | 4c1ac1b49122b805adfa4efc620592f68dccf5db (patch) | |
tree | 87557f4bc2fd4fe65b7570489c2f610c45c0adcd /drivers/infiniband/core/cma.c | |
parent | c4028958b6ecad064b1a6303a6a5906d4fe48d73 (diff) | |
parent | d916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/infiniband/core/iwcm.c
drivers/net/chelsio/cxgb2.c
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/usb/core/hub.h
drivers/usb/input/hid-core.c
net/core/netpoll.c
Fix up merge failures with Linus's head and fix new compilation failures.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r-- | drivers/infiniband/core/cma.c | 49 |
1 files changed, 21 insertions, 28 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 189f73f3f72..985a6b564d8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -344,7 +344,7 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) | |||
344 | return ret; | 344 | return ret; |
345 | 345 | ||
346 | qp_attr.qp_state = IB_QPS_INIT; | 346 | qp_attr.qp_state = IB_QPS_INIT; |
347 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; | 347 | qp_attr.qp_access_flags = 0; |
348 | qp_attr.port_num = id_priv->id.port_num; | 348 | qp_attr.port_num = id_priv->id.port_num; |
349 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | | 349 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS | |
350 | IB_QP_PKEY_INDEX | IB_QP_PORT); | 350 | IB_QP_PKEY_INDEX | IB_QP_PORT); |
@@ -935,13 +935,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
935 | mutex_lock(&lock); | 935 | mutex_lock(&lock); |
936 | ret = cma_acquire_dev(conn_id); | 936 | ret = cma_acquire_dev(conn_id); |
937 | mutex_unlock(&lock); | 937 | mutex_unlock(&lock); |
938 | if (ret) { | 938 | if (ret) |
939 | ret = -ENODEV; | 939 | goto release_conn_id; |
940 | cma_exch(conn_id, CMA_DESTROYING); | ||
941 | cma_release_remove(conn_id); | ||
942 | rdma_destroy_id(&conn_id->id); | ||
943 | goto out; | ||
944 | } | ||
945 | 940 | ||
946 | conn_id->cm_id.ib = cm_id; | 941 | conn_id->cm_id.ib = cm_id; |
947 | cm_id->context = conn_id; | 942 | cm_id->context = conn_id; |
@@ -951,13 +946,17 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
951 | ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, | 946 | ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, |
952 | ib_event->private_data + offset, | 947 | ib_event->private_data + offset, |
953 | IB_CM_REQ_PRIVATE_DATA_SIZE - offset); | 948 | IB_CM_REQ_PRIVATE_DATA_SIZE - offset); |
954 | if (ret) { | 949 | if (!ret) |
955 | /* Destroy the CM ID by returning a non-zero value. */ | 950 | goto out; |
956 | conn_id->cm_id.ib = NULL; | 951 | |
957 | cma_exch(conn_id, CMA_DESTROYING); | 952 | /* Destroy the CM ID by returning a non-zero value. */ |
958 | cma_release_remove(conn_id); | 953 | conn_id->cm_id.ib = NULL; |
959 | rdma_destroy_id(&conn_id->id); | 954 | |
960 | } | 955 | release_conn_id: |
956 | cma_exch(conn_id, CMA_DESTROYING); | ||
957 | cma_release_remove(conn_id); | ||
958 | rdma_destroy_id(&conn_id->id); | ||
959 | |||
961 | out: | 960 | out: |
962 | cma_release_remove(listen_id); | 961 | cma_release_remove(listen_id); |
963 | return ret; | 962 | return ret; |
@@ -1481,19 +1480,18 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) | |||
1481 | u8 p; | 1480 | u8 p; |
1482 | 1481 | ||
1483 | mutex_lock(&lock); | 1482 | mutex_lock(&lock); |
1483 | if (list_empty(&dev_list)) { | ||
1484 | ret = -ENODEV; | ||
1485 | goto out; | ||
1486 | } | ||
1484 | list_for_each_entry(cma_dev, &dev_list, list) | 1487 | list_for_each_entry(cma_dev, &dev_list, list) |
1485 | for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) | 1488 | for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p) |
1486 | if (!ib_query_port (cma_dev->device, p, &port_attr) && | 1489 | if (!ib_query_port(cma_dev->device, p, &port_attr) && |
1487 | port_attr.state == IB_PORT_ACTIVE) | 1490 | port_attr.state == IB_PORT_ACTIVE) |
1488 | goto port_found; | 1491 | goto port_found; |
1489 | 1492 | ||
1490 | if (!list_empty(&dev_list)) { | 1493 | p = 1; |
1491 | p = 1; | 1494 | cma_dev = list_entry(dev_list.next, struct cma_device, list); |
1492 | cma_dev = list_entry(dev_list.next, struct cma_device, list); | ||
1493 | } else { | ||
1494 | ret = -ENODEV; | ||
1495 | goto out; | ||
1496 | } | ||
1497 | 1495 | ||
1498 | port_found: | 1496 | port_found: |
1499 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); | 1497 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); |
@@ -2123,8 +2121,6 @@ static void cma_add_one(struct ib_device *device) | |||
2123 | 2121 | ||
2124 | cma_dev->device = device; | 2122 | cma_dev->device = device; |
2125 | cma_dev->node_guid = device->node_guid; | 2123 | cma_dev->node_guid = device->node_guid; |
2126 | if (!cma_dev->node_guid) | ||
2127 | goto err; | ||
2128 | 2124 | ||
2129 | init_completion(&cma_dev->comp); | 2125 | init_completion(&cma_dev->comp); |
2130 | atomic_set(&cma_dev->refcount, 1); | 2126 | atomic_set(&cma_dev->refcount, 1); |
@@ -2136,9 +2132,6 @@ static void cma_add_one(struct ib_device *device) | |||
2136 | list_for_each_entry(id_priv, &listen_any_list, list) | 2132 | list_for_each_entry(id_priv, &listen_any_list, list) |
2137 | cma_listen_on_dev(id_priv, cma_dev); | 2133 | cma_listen_on_dev(id_priv, cma_dev); |
2138 | mutex_unlock(&lock); | 2134 | mutex_unlock(&lock); |
2139 | return; | ||
2140 | err: | ||
2141 | kfree(cma_dev); | ||
2142 | } | 2135 | } |
2143 | 2136 | ||
2144 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) | 2137 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) |