diff options
Diffstat (limited to 'net/rds/ib_cm.c')
-rw-r--r-- | net/rds/ib_cm.c | 184 |
1 files changed, 126 insertions, 58 deletions
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index f68832798db2..ee369d201a65 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -38,6 +38,36 @@ | |||
38 | #include "rds.h" | 38 | #include "rds.h" |
39 | #include "ib.h" | 39 | #include "ib.h" |
40 | 40 | ||
41 | static char *rds_ib_event_type_strings[] = { | ||
42 | #define RDS_IB_EVENT_STRING(foo) \ | ||
43 | [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo) | ||
44 | RDS_IB_EVENT_STRING(CQ_ERR), | ||
45 | RDS_IB_EVENT_STRING(QP_FATAL), | ||
46 | RDS_IB_EVENT_STRING(QP_REQ_ERR), | ||
47 | RDS_IB_EVENT_STRING(QP_ACCESS_ERR), | ||
48 | RDS_IB_EVENT_STRING(COMM_EST), | ||
49 | RDS_IB_EVENT_STRING(SQ_DRAINED), | ||
50 | RDS_IB_EVENT_STRING(PATH_MIG), | ||
51 | RDS_IB_EVENT_STRING(PATH_MIG_ERR), | ||
52 | RDS_IB_EVENT_STRING(DEVICE_FATAL), | ||
53 | RDS_IB_EVENT_STRING(PORT_ACTIVE), | ||
54 | RDS_IB_EVENT_STRING(PORT_ERR), | ||
55 | RDS_IB_EVENT_STRING(LID_CHANGE), | ||
56 | RDS_IB_EVENT_STRING(PKEY_CHANGE), | ||
57 | RDS_IB_EVENT_STRING(SM_CHANGE), | ||
58 | RDS_IB_EVENT_STRING(SRQ_ERR), | ||
59 | RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED), | ||
60 | RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED), | ||
61 | RDS_IB_EVENT_STRING(CLIENT_REREGISTER), | ||
62 | #undef RDS_IB_EVENT_STRING | ||
63 | }; | ||
64 | |||
65 | static char *rds_ib_event_str(enum ib_event_type type) | ||
66 | { | ||
67 | return rds_str_array(rds_ib_event_type_strings, | ||
68 | ARRAY_SIZE(rds_ib_event_type_strings), type); | ||
69 | }; | ||
70 | |||
41 | /* | 71 | /* |
42 | * Set the selected protocol version | 72 | * Set the selected protocol version |
43 | */ | 73 | */ |
@@ -95,7 +125,6 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even | |||
95 | { | 125 | { |
96 | const struct rds_ib_connect_private *dp = NULL; | 126 | const struct rds_ib_connect_private *dp = NULL; |
97 | struct rds_ib_connection *ic = conn->c_transport_data; | 127 | struct rds_ib_connection *ic = conn->c_transport_data; |
98 | struct rds_ib_device *rds_ibdev; | ||
99 | struct ib_qp_attr qp_attr; | 128 | struct ib_qp_attr qp_attr; |
100 | int err; | 129 | int err; |
101 | 130 | ||
@@ -111,11 +140,21 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even | |||
111 | } | 140 | } |
112 | } | 141 | } |
113 | 142 | ||
114 | printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", | 143 | if (conn->c_version < RDS_PROTOCOL(3,1)) { |
115 | &conn->c_faddr, | 144 | printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," |
116 | RDS_PROTOCOL_MAJOR(conn->c_version), | 145 | " no longer supported\n", |
117 | RDS_PROTOCOL_MINOR(conn->c_version), | 146 | &conn->c_faddr, |
118 | ic->i_flowctl ? ", flow control" : ""); | 147 | RDS_PROTOCOL_MAJOR(conn->c_version), |
148 | RDS_PROTOCOL_MINOR(conn->c_version)); | ||
149 | rds_conn_destroy(conn); | ||
150 | return; | ||
151 | } else { | ||
152 | printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", | ||
153 | &conn->c_faddr, | ||
154 | RDS_PROTOCOL_MAJOR(conn->c_version), | ||
155 | RDS_PROTOCOL_MINOR(conn->c_version), | ||
156 | ic->i_flowctl ? ", flow control" : ""); | ||
157 | } | ||
119 | 158 | ||
120 | /* | 159 | /* |
121 | * Init rings and fill recv. this needs to wait until protocol negotiation | 160 | * Init rings and fill recv. this needs to wait until protocol negotiation |
@@ -125,7 +164,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even | |||
125 | rds_ib_recv_init_ring(ic); | 164 | rds_ib_recv_init_ring(ic); |
126 | /* Post receive buffers - as a side effect, this will update | 165 | /* Post receive buffers - as a side effect, this will update |
127 | * the posted credit count. */ | 166 | * the posted credit count. */ |
128 | rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); | 167 | rds_ib_recv_refill(conn, 1); |
129 | 168 | ||
130 | /* Tune RNR behavior */ | 169 | /* Tune RNR behavior */ |
131 | rds_ib_tune_rnr(ic, &qp_attr); | 170 | rds_ib_tune_rnr(ic, &qp_attr); |
@@ -135,12 +174,11 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even | |||
135 | if (err) | 174 | if (err) |
136 | printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); | 175 | printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); |
137 | 176 | ||
138 | /* update ib_device with this local ipaddr & conn */ | 177 | /* update ib_device with this local ipaddr */ |
139 | rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); | 178 | err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr); |
140 | err = rds_ib_update_ipaddr(rds_ibdev, conn->c_laddr); | ||
141 | if (err) | 179 | if (err) |
142 | printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err); | 180 | printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", |
143 | rds_ib_add_conn(rds_ibdev, conn); | 181 | err); |
144 | 182 | ||
145 | /* If the peer gave us the last packet it saw, process this as if | 183 | /* If the peer gave us the last packet it saw, process this as if |
146 | * we had received a regular ACK. */ | 184 | * we had received a regular ACK. */ |
@@ -153,18 +191,23 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even | |||
153 | static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, | 191 | static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, |
154 | struct rdma_conn_param *conn_param, | 192 | struct rdma_conn_param *conn_param, |
155 | struct rds_ib_connect_private *dp, | 193 | struct rds_ib_connect_private *dp, |
156 | u32 protocol_version) | 194 | u32 protocol_version, |
195 | u32 max_responder_resources, | ||
196 | u32 max_initiator_depth) | ||
157 | { | 197 | { |
198 | struct rds_ib_connection *ic = conn->c_transport_data; | ||
199 | struct rds_ib_device *rds_ibdev = ic->rds_ibdev; | ||
200 | |||
158 | memset(conn_param, 0, sizeof(struct rdma_conn_param)); | 201 | memset(conn_param, 0, sizeof(struct rdma_conn_param)); |
159 | /* XXX tune these? */ | 202 | |
160 | conn_param->responder_resources = 1; | 203 | conn_param->responder_resources = |
161 | conn_param->initiator_depth = 1; | 204 | min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources); |
205 | conn_param->initiator_depth = | ||
206 | min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth); | ||
162 | conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); | 207 | conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); |
163 | conn_param->rnr_retry_count = 7; | 208 | conn_param->rnr_retry_count = 7; |
164 | 209 | ||
165 | if (dp) { | 210 | if (dp) { |
166 | struct rds_ib_connection *ic = conn->c_transport_data; | ||
167 | |||
168 | memset(dp, 0, sizeof(*dp)); | 211 | memset(dp, 0, sizeof(*dp)); |
169 | dp->dp_saddr = conn->c_laddr; | 212 | dp->dp_saddr = conn->c_laddr; |
170 | dp->dp_daddr = conn->c_faddr; | 213 | dp->dp_daddr = conn->c_faddr; |
@@ -189,7 +232,8 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, | |||
189 | 232 | ||
190 | static void rds_ib_cq_event_handler(struct ib_event *event, void *data) | 233 | static void rds_ib_cq_event_handler(struct ib_event *event, void *data) |
191 | { | 234 | { |
192 | rdsdebug("event %u data %p\n", event->event, data); | 235 | rdsdebug("event %u (%s) data %p\n", |
236 | event->event, rds_ib_event_str(event->event), data); | ||
193 | } | 237 | } |
194 | 238 | ||
195 | static void rds_ib_qp_event_handler(struct ib_event *event, void *data) | 239 | static void rds_ib_qp_event_handler(struct ib_event *event, void *data) |
@@ -197,16 +241,18 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) | |||
197 | struct rds_connection *conn = data; | 241 | struct rds_connection *conn = data; |
198 | struct rds_ib_connection *ic = conn->c_transport_data; | 242 | struct rds_ib_connection *ic = conn->c_transport_data; |
199 | 243 | ||
200 | rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event); | 244 | rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, |
245 | rds_ib_event_str(event->event)); | ||
201 | 246 | ||
202 | switch (event->event) { | 247 | switch (event->event) { |
203 | case IB_EVENT_COMM_EST: | 248 | case IB_EVENT_COMM_EST: |
204 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); | 249 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); |
205 | break; | 250 | break; |
206 | default: | 251 | default: |
207 | rdsdebug("Fatal QP Event %u " | 252 | rdsdebug("Fatal QP Event %u (%s) " |
208 | "- connection %pI4->%pI4, reconnecting\n", | 253 | "- connection %pI4->%pI4, reconnecting\n", |
209 | event->event, &conn->c_laddr, &conn->c_faddr); | 254 | event->event, rds_ib_event_str(event->event), |
255 | &conn->c_laddr, &conn->c_faddr); | ||
210 | rds_conn_drop(conn); | 256 | rds_conn_drop(conn); |
211 | break; | 257 | break; |
212 | } | 258 | } |
@@ -224,18 +270,16 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
224 | struct rds_ib_device *rds_ibdev; | 270 | struct rds_ib_device *rds_ibdev; |
225 | int ret; | 271 | int ret; |
226 | 272 | ||
227 | /* rds_ib_add_one creates a rds_ib_device object per IB device, | 273 | /* |
228 | * and allocates a protection domain, memory range and FMR pool | 274 | * It's normal to see a null device if an incoming connection races |
229 | * for each. If that fails for any reason, it will not register | 275 | * with device removal, so we don't print a warning. |
230 | * the rds_ibdev at all. | ||
231 | */ | 276 | */ |
232 | rds_ibdev = ib_get_client_data(dev, &rds_ib_client); | 277 | rds_ibdev = rds_ib_get_client_data(dev); |
233 | if (rds_ibdev == NULL) { | 278 | if (!rds_ibdev) |
234 | if (printk_ratelimit()) | ||
235 | printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n", | ||
236 | dev->name); | ||
237 | return -EOPNOTSUPP; | 279 | return -EOPNOTSUPP; |
238 | } | 280 | |
281 | /* add the conn now so that connection establishment has the dev */ | ||
282 | rds_ib_add_conn(rds_ibdev, conn); | ||
239 | 283 | ||
240 | if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) | 284 | if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) |
241 | rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); | 285 | rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); |
@@ -306,7 +350,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
306 | ic->i_send_ring.w_nr * | 350 | ic->i_send_ring.w_nr * |
307 | sizeof(struct rds_header), | 351 | sizeof(struct rds_header), |
308 | &ic->i_send_hdrs_dma, GFP_KERNEL); | 352 | &ic->i_send_hdrs_dma, GFP_KERNEL); |
309 | if (ic->i_send_hdrs == NULL) { | 353 | if (!ic->i_send_hdrs) { |
310 | ret = -ENOMEM; | 354 | ret = -ENOMEM; |
311 | rdsdebug("ib_dma_alloc_coherent send failed\n"); | 355 | rdsdebug("ib_dma_alloc_coherent send failed\n"); |
312 | goto out; | 356 | goto out; |
@@ -316,7 +360,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
316 | ic->i_recv_ring.w_nr * | 360 | ic->i_recv_ring.w_nr * |
317 | sizeof(struct rds_header), | 361 | sizeof(struct rds_header), |
318 | &ic->i_recv_hdrs_dma, GFP_KERNEL); | 362 | &ic->i_recv_hdrs_dma, GFP_KERNEL); |
319 | if (ic->i_recv_hdrs == NULL) { | 363 | if (!ic->i_recv_hdrs) { |
320 | ret = -ENOMEM; | 364 | ret = -ENOMEM; |
321 | rdsdebug("ib_dma_alloc_coherent recv failed\n"); | 365 | rdsdebug("ib_dma_alloc_coherent recv failed\n"); |
322 | goto out; | 366 | goto out; |
@@ -324,22 +368,24 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
324 | 368 | ||
325 | ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), | 369 | ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), |
326 | &ic->i_ack_dma, GFP_KERNEL); | 370 | &ic->i_ack_dma, GFP_KERNEL); |
327 | if (ic->i_ack == NULL) { | 371 | if (!ic->i_ack) { |
328 | ret = -ENOMEM; | 372 | ret = -ENOMEM; |
329 | rdsdebug("ib_dma_alloc_coherent ack failed\n"); | 373 | rdsdebug("ib_dma_alloc_coherent ack failed\n"); |
330 | goto out; | 374 | goto out; |
331 | } | 375 | } |
332 | 376 | ||
333 | ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); | 377 | ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), |
334 | if (ic->i_sends == NULL) { | 378 | ibdev_to_node(dev)); |
379 | if (!ic->i_sends) { | ||
335 | ret = -ENOMEM; | 380 | ret = -ENOMEM; |
336 | rdsdebug("send allocation failed\n"); | 381 | rdsdebug("send allocation failed\n"); |
337 | goto out; | 382 | goto out; |
338 | } | 383 | } |
339 | memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); | 384 | memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); |
340 | 385 | ||
341 | ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); | 386 | ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), |
342 | if (ic->i_recvs == NULL) { | 387 | ibdev_to_node(dev)); |
388 | if (!ic->i_recvs) { | ||
343 | ret = -ENOMEM; | 389 | ret = -ENOMEM; |
344 | rdsdebug("recv allocation failed\n"); | 390 | rdsdebug("recv allocation failed\n"); |
345 | goto out; | 391 | goto out; |
@@ -352,6 +398,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
352 | ic->i_send_cq, ic->i_recv_cq); | 398 | ic->i_send_cq, ic->i_recv_cq); |
353 | 399 | ||
354 | out: | 400 | out: |
401 | rds_ib_dev_put(rds_ibdev); | ||
355 | return ret; | 402 | return ret; |
356 | } | 403 | } |
357 | 404 | ||
@@ -409,7 +456,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
409 | struct rds_ib_connection *ic = NULL; | 456 | struct rds_ib_connection *ic = NULL; |
410 | struct rdma_conn_param conn_param; | 457 | struct rdma_conn_param conn_param; |
411 | u32 version; | 458 | u32 version; |
412 | int err, destroy = 1; | 459 | int err = 1, destroy = 1; |
413 | 460 | ||
414 | /* Check whether the remote protocol version matches ours. */ | 461 | /* Check whether the remote protocol version matches ours. */ |
415 | version = rds_ib_protocol_compatible(event); | 462 | version = rds_ib_protocol_compatible(event); |
@@ -448,7 +495,6 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
448 | /* Wait and see - our connect may still be succeeding */ | 495 | /* Wait and see - our connect may still be succeeding */ |
449 | rds_ib_stats_inc(s_ib_connect_raced); | 496 | rds_ib_stats_inc(s_ib_connect_raced); |
450 | } | 497 | } |
451 | mutex_unlock(&conn->c_cm_lock); | ||
452 | goto out; | 498 | goto out; |
453 | } | 499 | } |
454 | 500 | ||
@@ -475,24 +521,23 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
475 | err = rds_ib_setup_qp(conn); | 521 | err = rds_ib_setup_qp(conn); |
476 | if (err) { | 522 | if (err) { |
477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); | 523 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); |
478 | mutex_unlock(&conn->c_cm_lock); | ||
479 | goto out; | 524 | goto out; |
480 | } | 525 | } |
481 | 526 | ||
482 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version); | 527 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version, |
528 | event->param.conn.responder_resources, | ||
529 | event->param.conn.initiator_depth); | ||
483 | 530 | ||
484 | /* rdma_accept() calls rdma_reject() internally if it fails */ | 531 | /* rdma_accept() calls rdma_reject() internally if it fails */ |
485 | err = rdma_accept(cm_id, &conn_param); | 532 | err = rdma_accept(cm_id, &conn_param); |
486 | mutex_unlock(&conn->c_cm_lock); | 533 | if (err) |
487 | if (err) { | ||
488 | rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err); | 534 | rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err); |
489 | goto out; | ||
490 | } | ||
491 | |||
492 | return 0; | ||
493 | 535 | ||
494 | out: | 536 | out: |
495 | rdma_reject(cm_id, NULL, 0); | 537 | if (conn) |
538 | mutex_unlock(&conn->c_cm_lock); | ||
539 | if (err) | ||
540 | rdma_reject(cm_id, NULL, 0); | ||
496 | return destroy; | 541 | return destroy; |
497 | } | 542 | } |
498 | 543 | ||
@@ -516,8 +561,8 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) | |||
516 | goto out; | 561 | goto out; |
517 | } | 562 | } |
518 | 563 | ||
519 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION); | 564 | rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION, |
520 | 565 | UINT_MAX, UINT_MAX); | |
521 | ret = rdma_connect(cm_id, &conn_param); | 566 | ret = rdma_connect(cm_id, &conn_param); |
522 | if (ret) | 567 | if (ret) |
523 | rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); | 568 | rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); |
@@ -601,9 +646,19 @@ void rds_ib_conn_shutdown(struct rds_connection *conn) | |||
601 | ic->i_cm_id, err); | 646 | ic->i_cm_id, err); |
602 | } | 647 | } |
603 | 648 | ||
649 | /* | ||
650 | * We want to wait for tx and rx completion to finish | ||
651 | * before we tear down the connection, but we have to be | ||
652 | * careful not to get stuck waiting on a send ring that | ||
653 | * only has unsignaled sends in it. We've shutdown new | ||
654 | * sends before getting here so by waiting for signaled | ||
655 | * sends to complete we're ensured that there will be no | ||
656 | * more tx processing. | ||
657 | */ | ||
604 | wait_event(rds_ib_ring_empty_wait, | 658 | wait_event(rds_ib_ring_empty_wait, |
605 | rds_ib_ring_empty(&ic->i_send_ring) && | 659 | rds_ib_ring_empty(&ic->i_recv_ring) && |
606 | rds_ib_ring_empty(&ic->i_recv_ring)); | 660 | (atomic_read(&ic->i_signaled_sends) == 0)); |
661 | tasklet_kill(&ic->i_recv_tasklet); | ||
607 | 662 | ||
608 | if (ic->i_send_hdrs) | 663 | if (ic->i_send_hdrs) |
609 | ib_dma_free_coherent(dev, | 664 | ib_dma_free_coherent(dev, |
@@ -654,9 +709,12 @@ void rds_ib_conn_shutdown(struct rds_connection *conn) | |||
654 | BUG_ON(ic->rds_ibdev); | 709 | BUG_ON(ic->rds_ibdev); |
655 | 710 | ||
656 | /* Clear pending transmit */ | 711 | /* Clear pending transmit */ |
657 | if (ic->i_rm) { | 712 | if (ic->i_data_op) { |
658 | rds_message_put(ic->i_rm); | 713 | struct rds_message *rm; |
659 | ic->i_rm = NULL; | 714 | |
715 | rm = container_of(ic->i_data_op, struct rds_message, data); | ||
716 | rds_message_put(rm); | ||
717 | ic->i_data_op = NULL; | ||
660 | } | 718 | } |
661 | 719 | ||
662 | /* Clear the ACK state */ | 720 | /* Clear the ACK state */ |
@@ -690,12 +748,19 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
690 | { | 748 | { |
691 | struct rds_ib_connection *ic; | 749 | struct rds_ib_connection *ic; |
692 | unsigned long flags; | 750 | unsigned long flags; |
751 | int ret; | ||
693 | 752 | ||
694 | /* XXX too lazy? */ | 753 | /* XXX too lazy? */ |
695 | ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL); | 754 | ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL); |
696 | if (ic == NULL) | 755 | if (!ic) |
697 | return -ENOMEM; | 756 | return -ENOMEM; |
698 | 757 | ||
758 | ret = rds_ib_recv_alloc_caches(ic); | ||
759 | if (ret) { | ||
760 | kfree(ic); | ||
761 | return ret; | ||
762 | } | ||
763 | |||
699 | INIT_LIST_HEAD(&ic->ib_node); | 764 | INIT_LIST_HEAD(&ic->ib_node); |
700 | tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, | 765 | tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, |
701 | (unsigned long) ic); | 766 | (unsigned long) ic); |
@@ -703,6 +768,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
703 | #ifndef KERNEL_HAS_ATOMIC64 | 768 | #ifndef KERNEL_HAS_ATOMIC64 |
704 | spin_lock_init(&ic->i_ack_lock); | 769 | spin_lock_init(&ic->i_ack_lock); |
705 | #endif | 770 | #endif |
771 | atomic_set(&ic->i_signaled_sends, 0); | ||
706 | 772 | ||
707 | /* | 773 | /* |
708 | * rds_ib_conn_shutdown() waits for these to be emptied so they | 774 | * rds_ib_conn_shutdown() waits for these to be emptied so they |
@@ -744,6 +810,8 @@ void rds_ib_conn_free(void *arg) | |||
744 | list_del(&ic->ib_node); | 810 | list_del(&ic->ib_node); |
745 | spin_unlock_irq(lock_ptr); | 811 | spin_unlock_irq(lock_ptr); |
746 | 812 | ||
813 | rds_ib_recv_free_caches(ic); | ||
814 | |||
747 | kfree(ic); | 815 | kfree(ic); |
748 | } | 816 | } |
749 | 817 | ||