aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/iser
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/iser')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
3 files changed, 71 insertions, 57 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93399dff0c6f..7b2fc98e2f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
325 */ 325 */
326 if (ib_conn) { 326 if (ib_conn) {
327 ib_conn->iser_conn = NULL; 327 ib_conn->iser_conn = NULL;
328 iser_conn_put(ib_conn); 328 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
329 } 329 }
330} 330}
331 331
@@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
357 /* binds the iSER connection retrieved from the previously 357 /* binds the iSER connection retrieved from the previously
358 * connected ep_handle to the iSCSI layer connection. exchanges 358 * connected ep_handle to the iSCSI layer connection. exchanges
359 * connection pointers */ 359 * connection pointers */
360 iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn); 360 iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
361 conn, conn->dd_data, ib_conn);
361 iser_conn = conn->dd_data; 362 iser_conn = conn->dd_data;
362 ib_conn->iser_conn = iser_conn; 363 ib_conn->iser_conn = iser_conn;
363 iser_conn->ib_conn = ib_conn; 364 iser_conn->ib_conn = ib_conn;
364 iser_conn_get(ib_conn); 365 iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
365 return 0; 366 return 0;
366} 367}
367 368
@@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
382 * There is no unbind event so the stop callback 383 * There is no unbind event so the stop callback
383 * must release the ref from the bind. 384 * must release the ref from the bind.
384 */ 385 */
385 iser_conn_put(ib_conn); 386 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
386 } 387 }
387 iser_conn->ib_conn = NULL; 388 iser_conn->ib_conn = NULL;
388} 389}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 036934cdcb92..f1df01567bb6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -232,6 +232,7 @@ struct iser_device {
232 struct ib_cq *tx_cq; 232 struct ib_cq *tx_cq;
233 struct ib_mr *mr; 233 struct ib_mr *mr;
234 struct tasklet_struct cq_tasklet; 234 struct tasklet_struct cq_tasklet;
235 struct ib_event_handler event_handler;
235 struct list_head ig_list; /* entry in ig devices list */ 236 struct list_head ig_list; /* entry in ig devices list */
236 int refcount; 237 int refcount;
237}; 238};
@@ -246,7 +247,6 @@ struct iser_conn {
246 struct rdma_cm_id *cma_id; /* CMA ID */ 247 struct rdma_cm_id *cma_id; /* CMA ID */
247 struct ib_qp *qp; /* QP */ 248 struct ib_qp *qp; /* QP */
248 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ 249 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
249 int disc_evt_flag; /* disconn event delivered */
250 wait_queue_head_t wait; /* waitq for conn/disconn */ 250 wait_queue_head_t wait; /* waitq for conn/disconn */
251 int post_recv_buf_count; /* posted rx count */ 251 int post_recv_buf_count; /* posted rx count */
252 atomic_t post_send_buf_count; /* posted tx count */ 252 atomic_t post_send_buf_count; /* posted tx count */
@@ -320,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn);
320 320
321void iser_conn_get(struct iser_conn *ib_conn); 321void iser_conn_get(struct iser_conn *ib_conn);
322 322
323void iser_conn_put(struct iser_conn *ib_conn); 323int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
324 324
325void iser_conn_terminate(struct iser_conn *ib_conn); 325void iser_conn_terminate(struct iser_conn *ib_conn);
326 326
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b89d76b39a13..9876865732f7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context)
54 iser_err("got qp event %d\n",cause->event); 54 iser_err("got qp event %d\n",cause->event);
55} 55}
56 56
57static void iser_event_handler(struct ib_event_handler *handler,
58 struct ib_event *event)
59{
60 iser_err("async event %d on device %s port %d\n", event->event,
61 event->device->name, event->element.port_num);
62}
63
57/** 64/**
58 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 65 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
59 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 66 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
@@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
96 if (IS_ERR(device->mr)) 103 if (IS_ERR(device->mr))
97 goto dma_mr_err; 104 goto dma_mr_err;
98 105
106 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
107 iser_event_handler);
108 if (ib_register_event_handler(&device->event_handler))
109 goto handler_err;
110
99 return 0; 111 return 0;
100 112
113handler_err:
114 ib_dereg_mr(device->mr);
101dma_mr_err: 115dma_mr_err:
102 tasklet_kill(&device->cq_tasklet); 116 tasklet_kill(&device->cq_tasklet);
103cq_arm_err: 117cq_arm_err:
@@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
120 BUG_ON(device->mr == NULL); 134 BUG_ON(device->mr == NULL);
121 135
122 tasklet_kill(&device->cq_tasklet); 136 tasklet_kill(&device->cq_tasklet);
123 137 (void)ib_unregister_event_handler(&device->event_handler);
124 (void)ib_dereg_mr(device->mr); 138 (void)ib_dereg_mr(device->mr);
125 (void)ib_destroy_cq(device->tx_cq); 139 (void)ib_destroy_cq(device->tx_cq);
126 (void)ib_destroy_cq(device->rx_cq); 140 (void)ib_destroy_cq(device->rx_cq);
@@ -149,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
149 device = ib_conn->device; 163 device = ib_conn->device;
150 164
151 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 165 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
152 if (!ib_conn->login_buf) { 166 if (!ib_conn->login_buf)
153 goto alloc_err; 167 goto out_err;
154 ret = -ENOMEM;
155 }
156 168
157 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, 169 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
158 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, 170 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
@@ -161,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
161 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 173 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
162 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 174 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
163 GFP_KERNEL); 175 GFP_KERNEL);
164 if (!ib_conn->page_vec) { 176 if (!ib_conn->page_vec)
165 ret = -ENOMEM; 177 goto out_err;
166 goto alloc_err; 178
167 }
168 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 179 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
169 180
170 params.page_shift = SHIFT_4K; 181 params.page_shift = SHIFT_4K;
@@ -184,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
184 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params); 195 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
185 if (IS_ERR(ib_conn->fmr_pool)) { 196 if (IS_ERR(ib_conn->fmr_pool)) {
186 ret = PTR_ERR(ib_conn->fmr_pool); 197 ret = PTR_ERR(ib_conn->fmr_pool);
187 goto fmr_pool_err; 198 ib_conn->fmr_pool = NULL;
199 goto out_err;
188 } 200 }
189 201
190 memset(&init_attr, 0, sizeof init_attr); 202 memset(&init_attr, 0, sizeof init_attr);
@@ -202,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
202 214
203 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 215 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
204 if (ret) 216 if (ret)
205 goto qp_err; 217 goto out_err;
206 218
207 ib_conn->qp = ib_conn->cma_id->qp; 219 ib_conn->qp = ib_conn->cma_id->qp;
208 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 220 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
@@ -210,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
210 ib_conn->fmr_pool, ib_conn->cma_id->qp); 222 ib_conn->fmr_pool, ib_conn->cma_id->qp);
211 return ret; 223 return ret;
212 224
213qp_err: 225out_err:
214 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
215fmr_pool_err:
216 kfree(ib_conn->page_vec);
217 kfree(ib_conn->login_buf);
218alloc_err:
219 iser_err("unable to alloc mem or create resource, err %d\n", ret); 226 iser_err("unable to alloc mem or create resource, err %d\n", ret);
220 return ret; 227 return ret;
221} 228}
@@ -224,7 +231,7 @@ alloc_err:
224 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 231 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
225 * -1 on failure 232 * -1 on failure
226 */ 233 */
227static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 234static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
228{ 235{
229 BUG_ON(ib_conn == NULL); 236 BUG_ON(ib_conn == NULL);
230 237
@@ -239,7 +246,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
239 if (ib_conn->qp != NULL) 246 if (ib_conn->qp != NULL)
240 rdma_destroy_qp(ib_conn->cma_id); 247 rdma_destroy_qp(ib_conn->cma_id);
241 248
242 if (ib_conn->cma_id != NULL) 249 /* if cma handler context, the caller acts s.t the cma destroy the id */
250 if (ib_conn->cma_id != NULL && can_destroy_id)
243 rdma_destroy_id(ib_conn->cma_id); 251 rdma_destroy_id(ib_conn->cma_id);
244 252
245 ib_conn->fmr_pool = NULL; 253 ib_conn->fmr_pool = NULL;
@@ -317,7 +325,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
317/** 325/**
318 * Frees all conn objects and deallocs conn descriptor 326 * Frees all conn objects and deallocs conn descriptor
319 */ 327 */
320static void iser_conn_release(struct iser_conn *ib_conn) 328static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
321{ 329{
322 struct iser_device *device = ib_conn->device; 330 struct iser_device *device = ib_conn->device;
323 331
@@ -327,13 +335,11 @@ static void iser_conn_release(struct iser_conn *ib_conn)
327 list_del(&ib_conn->conn_list); 335 list_del(&ib_conn->conn_list);
328 mutex_unlock(&ig.connlist_mutex); 336 mutex_unlock(&ig.connlist_mutex);
329 iser_free_rx_descriptors(ib_conn); 337 iser_free_rx_descriptors(ib_conn);
330 iser_free_ib_conn_res(ib_conn); 338 iser_free_ib_conn_res(ib_conn, can_destroy_id);
331 ib_conn->device = NULL; 339 ib_conn->device = NULL;
332 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 340 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
333 if (device != NULL) 341 if (device != NULL)
334 iser_device_try_release(device); 342 iser_device_try_release(device);
335 if (ib_conn->iser_conn)
336 ib_conn->iser_conn->ib_conn = NULL;
337 iscsi_destroy_endpoint(ib_conn->ep); 343 iscsi_destroy_endpoint(ib_conn->ep);
338} 344}
339 345
@@ -342,10 +348,13 @@ void iser_conn_get(struct iser_conn *ib_conn)
342 atomic_inc(&ib_conn->refcount); 348 atomic_inc(&ib_conn->refcount);
343} 349}
344 350
345void iser_conn_put(struct iser_conn *ib_conn) 351int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
346{ 352{
347 if (atomic_dec_and_test(&ib_conn->refcount)) 353 if (atomic_dec_and_test(&ib_conn->refcount)) {
348 iser_conn_release(ib_conn); 354 iser_conn_release(ib_conn, can_destroy_id);
355 return 1;
356 }
357 return 0;
349} 358}
350 359
351/** 360/**
@@ -369,19 +378,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
369 wait_event_interruptible(ib_conn->wait, 378 wait_event_interruptible(ib_conn->wait,
370 ib_conn->state == ISER_CONN_DOWN); 379 ib_conn->state == ISER_CONN_DOWN);
371 380
372 iser_conn_put(ib_conn); 381 iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
373} 382}
374 383
375static void iser_connect_error(struct rdma_cm_id *cma_id) 384static int iser_connect_error(struct rdma_cm_id *cma_id)
376{ 385{
377 struct iser_conn *ib_conn; 386 struct iser_conn *ib_conn;
378 ib_conn = (struct iser_conn *)cma_id->context; 387 ib_conn = (struct iser_conn *)cma_id->context;
379 388
380 ib_conn->state = ISER_CONN_DOWN; 389 ib_conn->state = ISER_CONN_DOWN;
381 wake_up_interruptible(&ib_conn->wait); 390 wake_up_interruptible(&ib_conn->wait);
391 return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
382} 392}
383 393
384static void iser_addr_handler(struct rdma_cm_id *cma_id) 394static int iser_addr_handler(struct rdma_cm_id *cma_id)
385{ 395{
386 struct iser_device *device; 396 struct iser_device *device;
387 struct iser_conn *ib_conn; 397 struct iser_conn *ib_conn;
@@ -390,8 +400,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
390 device = iser_device_find_by_ib_device(cma_id); 400 device = iser_device_find_by_ib_device(cma_id);
391 if (!device) { 401 if (!device) {
392 iser_err("device lookup/creation failed\n"); 402 iser_err("device lookup/creation failed\n");
393 iser_connect_error(cma_id); 403 return iser_connect_error(cma_id);
394 return;
395 } 404 }
396 405
397 ib_conn = (struct iser_conn *)cma_id->context; 406 ib_conn = (struct iser_conn *)cma_id->context;
@@ -400,11 +409,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
400 ret = rdma_resolve_route(cma_id, 1000); 409 ret = rdma_resolve_route(cma_id, 1000);
401 if (ret) { 410 if (ret) {
402 iser_err("resolve route failed: %d\n", ret); 411 iser_err("resolve route failed: %d\n", ret);
403 iser_connect_error(cma_id); 412 return iser_connect_error(cma_id);
404 } 413 }
414
415 return 0;
405} 416}
406 417
407static void iser_route_handler(struct rdma_cm_id *cma_id) 418static int iser_route_handler(struct rdma_cm_id *cma_id)
408{ 419{
409 struct rdma_conn_param conn_param; 420 struct rdma_conn_param conn_param;
410 int ret; 421 int ret;
@@ -425,9 +436,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
425 goto failure; 436 goto failure;
426 } 437 }
427 438
428 return; 439 return 0;
429failure: 440failure:
430 iser_connect_error(cma_id); 441 return iser_connect_error(cma_id);
431} 442}
432 443
433static void iser_connected_handler(struct rdma_cm_id *cma_id) 444static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -439,12 +450,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
439 wake_up_interruptible(&ib_conn->wait); 450 wake_up_interruptible(&ib_conn->wait);
440} 451}
441 452
442static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 453static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
443{ 454{
444 struct iser_conn *ib_conn; 455 struct iser_conn *ib_conn;
456 int ret;
445 457
446 ib_conn = (struct iser_conn *)cma_id->context; 458 ib_conn = (struct iser_conn *)cma_id->context;
447 ib_conn->disc_evt_flag = 1;
448 459
449 /* getting here when the state is UP means that the conn is being * 460 /* getting here when the state is UP means that the conn is being *
450 * terminated asynchronously from the iSCSI layer's perspective. */ 461 * terminated asynchronously from the iSCSI layer's perspective. */
@@ -459,20 +470,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
459 ib_conn->state = ISER_CONN_DOWN; 470 ib_conn->state = ISER_CONN_DOWN;
460 wake_up_interruptible(&ib_conn->wait); 471 wake_up_interruptible(&ib_conn->wait);
461 } 472 }
473
474 ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
475 return ret;
462} 476}
463 477
464static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 478static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
465{ 479{
466 int ret = 0; 480 int ret = 0;
467 481
468 iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id); 482 iser_err("event %d status %d conn %p id %p\n",
483 event->event, event->status, cma_id->context, cma_id);
469 484
470 switch (event->event) { 485 switch (event->event) {
471 case RDMA_CM_EVENT_ADDR_RESOLVED: 486 case RDMA_CM_EVENT_ADDR_RESOLVED:
472 iser_addr_handler(cma_id); 487 ret = iser_addr_handler(cma_id);
473 break; 488 break;
474 case RDMA_CM_EVENT_ROUTE_RESOLVED: 489 case RDMA_CM_EVENT_ROUTE_RESOLVED:
475 iser_route_handler(cma_id); 490 ret = iser_route_handler(cma_id);
476 break; 491 break;
477 case RDMA_CM_EVENT_ESTABLISHED: 492 case RDMA_CM_EVENT_ESTABLISHED:
478 iser_connected_handler(cma_id); 493 iser_connected_handler(cma_id);
@@ -482,13 +497,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
482 case RDMA_CM_EVENT_CONNECT_ERROR: 497 case RDMA_CM_EVENT_CONNECT_ERROR:
483 case RDMA_CM_EVENT_UNREACHABLE: 498 case RDMA_CM_EVENT_UNREACHABLE:
484 case RDMA_CM_EVENT_REJECTED: 499 case RDMA_CM_EVENT_REJECTED:
485 iser_err("event: %d, error: %d\n", event->event, event->status); 500 ret = iser_connect_error(cma_id);
486 iser_connect_error(cma_id);
487 break; 501 break;
488 case RDMA_CM_EVENT_DISCONNECTED: 502 case RDMA_CM_EVENT_DISCONNECTED:
489 case RDMA_CM_EVENT_DEVICE_REMOVAL: 503 case RDMA_CM_EVENT_DEVICE_REMOVAL:
490 case RDMA_CM_EVENT_ADDR_CHANGE: 504 case RDMA_CM_EVENT_ADDR_CHANGE:
491 iser_disconnected_handler(cma_id); 505 ret = iser_disconnected_handler(cma_id);
492 break; 506 break;
493 default: 507 default:
494 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 508 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -503,7 +517,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
503 init_waitqueue_head(&ib_conn->wait); 517 init_waitqueue_head(&ib_conn->wait);
504 ib_conn->post_recv_buf_count = 0; 518 ib_conn->post_recv_buf_count = 0;
505 atomic_set(&ib_conn->post_send_buf_count, 0); 519 atomic_set(&ib_conn->post_send_buf_count, 0);
506 atomic_set(&ib_conn->refcount, 1); 520 atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
507 INIT_LIST_HEAD(&ib_conn->conn_list); 521 INIT_LIST_HEAD(&ib_conn->conn_list);
508 spin_lock_init(&ib_conn->lock); 522 spin_lock_init(&ib_conn->lock);
509} 523}
@@ -531,6 +545,7 @@ int iser_connect(struct iser_conn *ib_conn,
531 545
532 ib_conn->state = ISER_CONN_PENDING; 546 ib_conn->state = ISER_CONN_PENDING;
533 547
548 iser_conn_get(ib_conn); /* ref ib conn's cma id */
534 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
535 (void *)ib_conn, 550 (void *)ib_conn,
536 RDMA_PS_TCP); 551 RDMA_PS_TCP);
@@ -568,7 +583,7 @@ id_failure:
568addr_failure: 583addr_failure:
569 ib_conn->state = ISER_CONN_DOWN; 584 ib_conn->state = ISER_CONN_DOWN;
570connect_failure: 585connect_failure:
571 iser_conn_release(ib_conn); 586 iser_conn_release(ib_conn, 1);
572 return err; 587 return err;
573} 588}
574 589
@@ -737,12 +752,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
737 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 752 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
738 ISCSI_ERR_CONN_FAILED); 753 ISCSI_ERR_CONN_FAILED);
739 754
740 /* complete the termination process if disconnect event was delivered * 755 /* no more non completed posts to the QP, complete the
741 * note there are no more non completed posts to the QP */ 756 * termination process w.o worrying on disconnect event */
742 if (ib_conn->disc_evt_flag) { 757 ib_conn->state = ISER_CONN_DOWN;
743 ib_conn->state = ISER_CONN_DOWN; 758 wake_up_interruptible(&ib_conn->wait);
744 wake_up_interruptible(&ib_conn->wait);
745 }
746 } 759 }
747} 760}
748 761