diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 314 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 11 |
2 files changed, 191 insertions, 134 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 922d845f76b0..d5088ce78290 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -222,27 +222,29 @@ static int srp_new_cm_id(struct srp_target_port *target) | |||
222 | static int srp_create_target_ib(struct srp_target_port *target) | 222 | static int srp_create_target_ib(struct srp_target_port *target) |
223 | { | 223 | { |
224 | struct ib_qp_init_attr *init_attr; | 224 | struct ib_qp_init_attr *init_attr; |
225 | struct ib_cq *recv_cq, *send_cq; | ||
226 | struct ib_qp *qp; | ||
225 | int ret; | 227 | int ret; |
226 | 228 | ||
227 | init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); | 229 | init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); |
228 | if (!init_attr) | 230 | if (!init_attr) |
229 | return -ENOMEM; | 231 | return -ENOMEM; |
230 | 232 | ||
231 | target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, | 233 | recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, |
232 | srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); | 234 | srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); |
233 | if (IS_ERR(target->recv_cq)) { | 235 | if (IS_ERR(recv_cq)) { |
234 | ret = PTR_ERR(target->recv_cq); | 236 | ret = PTR_ERR(recv_cq); |
235 | goto err; | 237 | goto err; |
236 | } | 238 | } |
237 | 239 | ||
238 | target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev, | 240 | send_cq = ib_create_cq(target->srp_host->srp_dev->dev, |
239 | srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); | 241 | srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); |
240 | if (IS_ERR(target->send_cq)) { | 242 | if (IS_ERR(send_cq)) { |
241 | ret = PTR_ERR(target->send_cq); | 243 | ret = PTR_ERR(send_cq); |
242 | goto err_recv_cq; | 244 | goto err_recv_cq; |
243 | } | 245 | } |
244 | 246 | ||
245 | ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP); | 247 | ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); |
246 | 248 | ||
247 | init_attr->event_handler = srp_qp_event; | 249 | init_attr->event_handler = srp_qp_event; |
248 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; | 250 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; |
@@ -251,30 +253,41 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
251 | init_attr->cap.max_send_sge = 1; | 253 | init_attr->cap.max_send_sge = 1; |
252 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | 254 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; |
253 | init_attr->qp_type = IB_QPT_RC; | 255 | init_attr->qp_type = IB_QPT_RC; |
254 | init_attr->send_cq = target->send_cq; | 256 | init_attr->send_cq = send_cq; |
255 | init_attr->recv_cq = target->recv_cq; | 257 | init_attr->recv_cq = recv_cq; |
256 | 258 | ||
257 | target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); | 259 | qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); |
258 | if (IS_ERR(target->qp)) { | 260 | if (IS_ERR(qp)) { |
259 | ret = PTR_ERR(target->qp); | 261 | ret = PTR_ERR(qp); |
260 | goto err_send_cq; | 262 | goto err_send_cq; |
261 | } | 263 | } |
262 | 264 | ||
263 | ret = srp_init_qp(target, target->qp); | 265 | ret = srp_init_qp(target, qp); |
264 | if (ret) | 266 | if (ret) |
265 | goto err_qp; | 267 | goto err_qp; |
266 | 268 | ||
269 | if (target->qp) | ||
270 | ib_destroy_qp(target->qp); | ||
271 | if (target->recv_cq) | ||
272 | ib_destroy_cq(target->recv_cq); | ||
273 | if (target->send_cq) | ||
274 | ib_destroy_cq(target->send_cq); | ||
275 | |||
276 | target->qp = qp; | ||
277 | target->recv_cq = recv_cq; | ||
278 | target->send_cq = send_cq; | ||
279 | |||
267 | kfree(init_attr); | 280 | kfree(init_attr); |
268 | return 0; | 281 | return 0; |
269 | 282 | ||
270 | err_qp: | 283 | err_qp: |
271 | ib_destroy_qp(target->qp); | 284 | ib_destroy_qp(qp); |
272 | 285 | ||
273 | err_send_cq: | 286 | err_send_cq: |
274 | ib_destroy_cq(target->send_cq); | 287 | ib_destroy_cq(send_cq); |
275 | 288 | ||
276 | err_recv_cq: | 289 | err_recv_cq: |
277 | ib_destroy_cq(target->recv_cq); | 290 | ib_destroy_cq(recv_cq); |
278 | 291 | ||
279 | err: | 292 | err: |
280 | kfree(init_attr); | 293 | kfree(init_attr); |
@@ -289,6 +302,9 @@ static void srp_free_target_ib(struct srp_target_port *target) | |||
289 | ib_destroy_cq(target->send_cq); | 302 | ib_destroy_cq(target->send_cq); |
290 | ib_destroy_cq(target->recv_cq); | 303 | ib_destroy_cq(target->recv_cq); |
291 | 304 | ||
305 | target->qp = NULL; | ||
306 | target->send_cq = target->recv_cq = NULL; | ||
307 | |||
292 | for (i = 0; i < SRP_RQ_SIZE; ++i) | 308 | for (i = 0; i < SRP_RQ_SIZE; ++i) |
293 | srp_free_iu(target->srp_host, target->rx_ring[i]); | 309 | srp_free_iu(target->srp_host, target->rx_ring[i]); |
294 | for (i = 0; i < SRP_SQ_SIZE; ++i) | 310 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
@@ -428,34 +444,50 @@ static int srp_send_req(struct srp_target_port *target) | |||
428 | return status; | 444 | return status; |
429 | } | 445 | } |
430 | 446 | ||
431 | static void srp_disconnect_target(struct srp_target_port *target) | 447 | static bool srp_queue_remove_work(struct srp_target_port *target) |
432 | { | 448 | { |
433 | /* XXX should send SRP_I_LOGOUT request */ | 449 | bool changed = false; |
434 | 450 | ||
435 | init_completion(&target->done); | 451 | spin_lock_irq(&target->lock); |
436 | if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { | 452 | if (target->state != SRP_TARGET_REMOVED) { |
437 | shost_printk(KERN_DEBUG, target->scsi_host, | 453 | target->state = SRP_TARGET_REMOVED; |
438 | PFX "Sending CM DREQ failed\n"); | 454 | changed = true; |
439 | return; | ||
440 | } | 455 | } |
441 | wait_for_completion(&target->done); | 456 | spin_unlock_irq(&target->lock); |
457 | |||
458 | if (changed) | ||
459 | queue_work(system_long_wq, &target->remove_work); | ||
460 | |||
461 | return changed; | ||
442 | } | 462 | } |
443 | 463 | ||
444 | static bool srp_change_state(struct srp_target_port *target, | 464 | static bool srp_change_conn_state(struct srp_target_port *target, |
445 | enum srp_target_state old, | 465 | bool connected) |
446 | enum srp_target_state new) | ||
447 | { | 466 | { |
448 | bool changed = false; | 467 | bool changed = false; |
449 | 468 | ||
450 | spin_lock_irq(&target->lock); | 469 | spin_lock_irq(&target->lock); |
451 | if (target->state == old) { | 470 | if (target->connected != connected) { |
452 | target->state = new; | 471 | target->connected = connected; |
453 | changed = true; | 472 | changed = true; |
454 | } | 473 | } |
455 | spin_unlock_irq(&target->lock); | 474 | spin_unlock_irq(&target->lock); |
475 | |||
456 | return changed; | 476 | return changed; |
457 | } | 477 | } |
458 | 478 | ||
479 | static void srp_disconnect_target(struct srp_target_port *target) | ||
480 | { | ||
481 | if (srp_change_conn_state(target, false)) { | ||
482 | /* XXX should send SRP_I_LOGOUT request */ | ||
483 | |||
484 | if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { | ||
485 | shost_printk(KERN_DEBUG, target->scsi_host, | ||
486 | PFX "Sending CM DREQ failed\n"); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | |||
459 | static void srp_free_req_data(struct srp_target_port *target) | 491 | static void srp_free_req_data(struct srp_target_port *target) |
460 | { | 492 | { |
461 | struct ib_device *ibdev = target->srp_host->srp_dev->dev; | 493 | struct ib_device *ibdev = target->srp_host->srp_dev->dev; |
@@ -489,32 +521,50 @@ static void srp_del_scsi_host_attr(struct Scsi_Host *shost) | |||
489 | device_remove_file(&shost->shost_dev, *attr); | 521 | device_remove_file(&shost->shost_dev, *attr); |
490 | } | 522 | } |
491 | 523 | ||
492 | static void srp_remove_work(struct work_struct *work) | 524 | static void srp_remove_target(struct srp_target_port *target) |
493 | { | 525 | { |
494 | struct srp_target_port *target = | 526 | WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); |
495 | container_of(work, struct srp_target_port, work); | ||
496 | |||
497 | if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED)) | ||
498 | return; | ||
499 | |||
500 | spin_lock(&target->srp_host->target_lock); | ||
501 | list_del(&target->list); | ||
502 | spin_unlock(&target->srp_host->target_lock); | ||
503 | 527 | ||
504 | srp_del_scsi_host_attr(target->scsi_host); | 528 | srp_del_scsi_host_attr(target->scsi_host); |
505 | srp_remove_host(target->scsi_host); | 529 | srp_remove_host(target->scsi_host); |
506 | scsi_remove_host(target->scsi_host); | 530 | scsi_remove_host(target->scsi_host); |
531 | srp_disconnect_target(target); | ||
507 | ib_destroy_cm_id(target->cm_id); | 532 | ib_destroy_cm_id(target->cm_id); |
508 | srp_free_target_ib(target); | 533 | srp_free_target_ib(target); |
509 | srp_free_req_data(target); | 534 | srp_free_req_data(target); |
510 | scsi_host_put(target->scsi_host); | 535 | scsi_host_put(target->scsi_host); |
511 | } | 536 | } |
512 | 537 | ||
538 | static void srp_remove_work(struct work_struct *work) | ||
539 | { | ||
540 | struct srp_target_port *target = | ||
541 | container_of(work, struct srp_target_port, remove_work); | ||
542 | |||
543 | WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); | ||
544 | |||
545 | spin_lock(&target->srp_host->target_lock); | ||
546 | list_del(&target->list); | ||
547 | spin_unlock(&target->srp_host->target_lock); | ||
548 | |||
549 | srp_remove_target(target); | ||
550 | } | ||
551 | |||
552 | static void srp_rport_delete(struct srp_rport *rport) | ||
553 | { | ||
554 | struct srp_target_port *target = rport->lld_data; | ||
555 | |||
556 | srp_queue_remove_work(target); | ||
557 | } | ||
558 | |||
513 | static int srp_connect_target(struct srp_target_port *target) | 559 | static int srp_connect_target(struct srp_target_port *target) |
514 | { | 560 | { |
515 | int retries = 3; | 561 | int retries = 3; |
516 | int ret; | 562 | int ret; |
517 | 563 | ||
564 | WARN_ON_ONCE(target->connected); | ||
565 | |||
566 | target->qp_in_error = false; | ||
567 | |||
518 | ret = srp_lookup_path(target); | 568 | ret = srp_lookup_path(target); |
519 | if (ret) | 569 | if (ret) |
520 | return ret; | 570 | return ret; |
@@ -534,6 +584,7 @@ static int srp_connect_target(struct srp_target_port *target) | |||
534 | */ | 584 | */ |
535 | switch (target->status) { | 585 | switch (target->status) { |
536 | case 0: | 586 | case 0: |
587 | srp_change_conn_state(target, true); | ||
537 | return 0; | 588 | return 0; |
538 | 589 | ||
539 | case SRP_PORT_REDIRECT: | 590 | case SRP_PORT_REDIRECT: |
@@ -646,13 +697,14 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re | |||
646 | 697 | ||
647 | static int srp_reconnect_target(struct srp_target_port *target) | 698 | static int srp_reconnect_target(struct srp_target_port *target) |
648 | { | 699 | { |
649 | struct ib_qp_attr qp_attr; | 700 | struct Scsi_Host *shost = target->scsi_host; |
650 | struct ib_wc wc; | ||
651 | int i, ret; | 701 | int i, ret; |
652 | 702 | ||
653 | if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING)) | 703 | if (target->state != SRP_TARGET_LIVE) |
654 | return -EAGAIN; | 704 | return -EAGAIN; |
655 | 705 | ||
706 | scsi_target_block(&shost->shost_gendev); | ||
707 | |||
656 | srp_disconnect_target(target); | 708 | srp_disconnect_target(target); |
657 | /* | 709 | /* |
658 | * Now get a new local CM ID so that we avoid confusing the | 710 | * Now get a new local CM ID so that we avoid confusing the |
@@ -660,21 +712,11 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
660 | */ | 712 | */ |
661 | ret = srp_new_cm_id(target); | 713 | ret = srp_new_cm_id(target); |
662 | if (ret) | 714 | if (ret) |
663 | goto err; | 715 | goto unblock; |
664 | 716 | ||
665 | qp_attr.qp_state = IB_QPS_RESET; | 717 | ret = srp_create_target_ib(target); |
666 | ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); | ||
667 | if (ret) | ||
668 | goto err; | ||
669 | |||
670 | ret = srp_init_qp(target, target->qp); | ||
671 | if (ret) | 718 | if (ret) |
672 | goto err; | 719 | goto unblock; |
673 | |||
674 | while (ib_poll_cq(target->recv_cq, 1, &wc) > 0) | ||
675 | ; /* nothing */ | ||
676 | while (ib_poll_cq(target->send_cq, 1, &wc) > 0) | ||
677 | ; /* nothing */ | ||
678 | 720 | ||
679 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 721 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
680 | struct srp_request *req = &target->req_ring[i]; | 722 | struct srp_request *req = &target->req_ring[i]; |
@@ -686,13 +728,16 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
686 | for (i = 0; i < SRP_SQ_SIZE; ++i) | 728 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
687 | list_add(&target->tx_ring[i]->list, &target->free_tx); | 729 | list_add(&target->tx_ring[i]->list, &target->free_tx); |
688 | 730 | ||
689 | target->qp_in_error = 0; | ||
690 | ret = srp_connect_target(target); | 731 | ret = srp_connect_target(target); |
732 | |||
733 | unblock: | ||
734 | scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : | ||
735 | SDEV_TRANSPORT_OFFLINE); | ||
736 | |||
691 | if (ret) | 737 | if (ret) |
692 | goto err; | 738 | goto err; |
693 | 739 | ||
694 | if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE)) | 740 | shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n"); |
695 | ret = -EAGAIN; | ||
696 | 741 | ||
697 | return ret; | 742 | return ret; |
698 | 743 | ||
@@ -705,17 +750,8 @@ err: | |||
705 | * However, we have to defer the real removal because we | 750 | * However, we have to defer the real removal because we |
706 | * are in the context of the SCSI error handler now, which | 751 | * are in the context of the SCSI error handler now, which |
707 | * will deadlock if we call scsi_remove_host(). | 752 | * will deadlock if we call scsi_remove_host(). |
708 | * | ||
709 | * Schedule our work inside the lock to avoid a race with | ||
710 | * the flush_scheduled_work() in srp_remove_one(). | ||
711 | */ | 753 | */ |
712 | spin_lock_irq(&target->lock); | 754 | srp_queue_remove_work(target); |
713 | if (target->state == SRP_TARGET_CONNECTING) { | ||
714 | target->state = SRP_TARGET_DEAD; | ||
715 | INIT_WORK(&target->work, srp_remove_work); | ||
716 | queue_work(ib_wq, &target->work); | ||
717 | } | ||
718 | spin_unlock_irq(&target->lock); | ||
719 | 755 | ||
720 | return ret; | 756 | return ret; |
721 | } | 757 | } |
@@ -1262,6 +1298,19 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
1262 | PFX "Recv failed with error code %d\n", res); | 1298 | PFX "Recv failed with error code %d\n", res); |
1263 | } | 1299 | } |
1264 | 1300 | ||
1301 | static void srp_handle_qp_err(enum ib_wc_status wc_status, | ||
1302 | enum ib_wc_opcode wc_opcode, | ||
1303 | struct srp_target_port *target) | ||
1304 | { | ||
1305 | if (target->connected && !target->qp_in_error) { | ||
1306 | shost_printk(KERN_ERR, target->scsi_host, | ||
1307 | PFX "failed %s status %d\n", | ||
1308 | wc_opcode & IB_WC_RECV ? "receive" : "send", | ||
1309 | wc_status); | ||
1310 | } | ||
1311 | target->qp_in_error = true; | ||
1312 | } | ||
1313 | |||
1265 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) | 1314 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) |
1266 | { | 1315 | { |
1267 | struct srp_target_port *target = target_ptr; | 1316 | struct srp_target_port *target = target_ptr; |
@@ -1269,15 +1318,11 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) | |||
1269 | 1318 | ||
1270 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 1319 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
1271 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 1320 | while (ib_poll_cq(cq, 1, &wc) > 0) { |
1272 | if (wc.status) { | 1321 | if (likely(wc.status == IB_WC_SUCCESS)) { |
1273 | shost_printk(KERN_ERR, target->scsi_host, | 1322 | srp_handle_recv(target, &wc); |
1274 | PFX "failed receive status %d\n", | 1323 | } else { |
1275 | wc.status); | 1324 | srp_handle_qp_err(wc.status, wc.opcode, target); |
1276 | target->qp_in_error = 1; | ||
1277 | break; | ||
1278 | } | 1325 | } |
1279 | |||
1280 | srp_handle_recv(target, &wc); | ||
1281 | } | 1326 | } |
1282 | } | 1327 | } |
1283 | 1328 | ||
@@ -1288,16 +1333,12 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1288 | struct srp_iu *iu; | 1333 | struct srp_iu *iu; |
1289 | 1334 | ||
1290 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 1335 | while (ib_poll_cq(cq, 1, &wc) > 0) { |
1291 | if (wc.status) { | 1336 | if (likely(wc.status == IB_WC_SUCCESS)) { |
1292 | shost_printk(KERN_ERR, target->scsi_host, | 1337 | iu = (struct srp_iu *) (uintptr_t) wc.wr_id; |
1293 | PFX "failed send status %d\n", | 1338 | list_add(&iu->list, &target->free_tx); |
1294 | wc.status); | 1339 | } else { |
1295 | target->qp_in_error = 1; | 1340 | srp_handle_qp_err(wc.status, wc.opcode, target); |
1296 | break; | ||
1297 | } | 1341 | } |
1298 | |||
1299 | iu = (struct srp_iu *) (uintptr_t) wc.wr_id; | ||
1300 | list_add(&iu->list, &target->free_tx); | ||
1301 | } | 1342 | } |
1302 | } | 1343 | } |
1303 | 1344 | ||
@@ -1311,16 +1352,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1311 | unsigned long flags; | 1352 | unsigned long flags; |
1312 | int len; | 1353 | int len; |
1313 | 1354 | ||
1314 | if (target->state == SRP_TARGET_CONNECTING) | ||
1315 | goto err; | ||
1316 | |||
1317 | if (target->state == SRP_TARGET_DEAD || | ||
1318 | target->state == SRP_TARGET_REMOVED) { | ||
1319 | scmnd->result = DID_BAD_TARGET << 16; | ||
1320 | scmnd->scsi_done(scmnd); | ||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | spin_lock_irqsave(&target->lock, flags); | 1355 | spin_lock_irqsave(&target->lock, flags); |
1325 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); | 1356 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
1326 | if (!iu) | 1357 | if (!iu) |
@@ -1377,7 +1408,6 @@ err_iu: | |||
1377 | err_unlock: | 1408 | err_unlock: |
1378 | spin_unlock_irqrestore(&target->lock, flags); | 1409 | spin_unlock_irqrestore(&target->lock, flags); |
1379 | 1410 | ||
1380 | err: | ||
1381 | return SCSI_MLQUEUE_HOST_BUSY; | 1411 | return SCSI_MLQUEUE_HOST_BUSY; |
1382 | } | 1412 | } |
1383 | 1413 | ||
@@ -1419,6 +1449,33 @@ err: | |||
1419 | return -ENOMEM; | 1449 | return -ENOMEM; |
1420 | } | 1450 | } |
1421 | 1451 | ||
1452 | static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) | ||
1453 | { | ||
1454 | uint64_t T_tr_ns, max_compl_time_ms; | ||
1455 | uint32_t rq_tmo_jiffies; | ||
1456 | |||
1457 | /* | ||
1458 | * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, | ||
1459 | * table 91), both the QP timeout and the retry count have to be set | ||
1460 | * for RC QP's during the RTR to RTS transition. | ||
1461 | */ | ||
1462 | WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != | ||
1463 | (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); | ||
1464 | |||
1465 | /* | ||
1466 | * Set target->rq_tmo_jiffies to one second more than the largest time | ||
1467 | * it can take before an error completion is generated. See also | ||
1468 | * C9-140..142 in the IBTA spec for more information about how to | ||
1469 | * convert the QP Local ACK Timeout value to nanoseconds. | ||
1470 | */ | ||
1471 | T_tr_ns = 4096 * (1ULL << qp_attr->timeout); | ||
1472 | max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; | ||
1473 | do_div(max_compl_time_ms, NSEC_PER_MSEC); | ||
1474 | rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); | ||
1475 | |||
1476 | return rq_tmo_jiffies; | ||
1477 | } | ||
1478 | |||
1422 | static void srp_cm_rep_handler(struct ib_cm_id *cm_id, | 1479 | static void srp_cm_rep_handler(struct ib_cm_id *cm_id, |
1423 | struct srp_login_rsp *lrsp, | 1480 | struct srp_login_rsp *lrsp, |
1424 | struct srp_target_port *target) | 1481 | struct srp_target_port *target) |
@@ -1478,6 +1535,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, | |||
1478 | if (ret) | 1535 | if (ret) |
1479 | goto error_free; | 1536 | goto error_free; |
1480 | 1537 | ||
1538 | target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); | ||
1539 | |||
1481 | ret = ib_modify_qp(target->qp, qp_attr, attr_mask); | 1540 | ret = ib_modify_qp(target->qp, qp_attr, attr_mask); |
1482 | if (ret) | 1541 | if (ret) |
1483 | goto error_free; | 1542 | goto error_free; |
@@ -1599,6 +1658,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1599 | case IB_CM_DREQ_RECEIVED: | 1658 | case IB_CM_DREQ_RECEIVED: |
1600 | shost_printk(KERN_WARNING, target->scsi_host, | 1659 | shost_printk(KERN_WARNING, target->scsi_host, |
1601 | PFX "DREQ received - connection closed\n"); | 1660 | PFX "DREQ received - connection closed\n"); |
1661 | srp_change_conn_state(target, false); | ||
1602 | if (ib_send_cm_drep(cm_id, NULL, 0)) | 1662 | if (ib_send_cm_drep(cm_id, NULL, 0)) |
1603 | shost_printk(KERN_ERR, target->scsi_host, | 1663 | shost_printk(KERN_ERR, target->scsi_host, |
1604 | PFX "Sending CM DREP failed\n"); | 1664 | PFX "Sending CM DREP failed\n"); |
@@ -1608,7 +1668,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1608 | shost_printk(KERN_ERR, target->scsi_host, | 1668 | shost_printk(KERN_ERR, target->scsi_host, |
1609 | PFX "connection closed\n"); | 1669 | PFX "connection closed\n"); |
1610 | 1670 | ||
1611 | comp = 1; | ||
1612 | target->status = 0; | 1671 | target->status = 0; |
1613 | break; | 1672 | break; |
1614 | 1673 | ||
@@ -1636,10 +1695,6 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
1636 | struct srp_iu *iu; | 1695 | struct srp_iu *iu; |
1637 | struct srp_tsk_mgmt *tsk_mgmt; | 1696 | struct srp_tsk_mgmt *tsk_mgmt; |
1638 | 1697 | ||
1639 | if (target->state == SRP_TARGET_DEAD || | ||
1640 | target->state == SRP_TARGET_REMOVED) | ||
1641 | return -1; | ||
1642 | |||
1643 | init_completion(&target->tsk_mgmt_done); | 1698 | init_completion(&target->tsk_mgmt_done); |
1644 | 1699 | ||
1645 | spin_lock_irq(&target->lock); | 1700 | spin_lock_irq(&target->lock); |
@@ -1729,6 +1784,21 @@ static int srp_reset_host(struct scsi_cmnd *scmnd) | |||
1729 | return ret; | 1784 | return ret; |
1730 | } | 1785 | } |
1731 | 1786 | ||
1787 | static int srp_slave_configure(struct scsi_device *sdev) | ||
1788 | { | ||
1789 | struct Scsi_Host *shost = sdev->host; | ||
1790 | struct srp_target_port *target = host_to_target(shost); | ||
1791 | struct request_queue *q = sdev->request_queue; | ||
1792 | unsigned long timeout; | ||
1793 | |||
1794 | if (sdev->type == TYPE_DISK) { | ||
1795 | timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); | ||
1796 | blk_queue_rq_timeout(q, timeout); | ||
1797 | } | ||
1798 | |||
1799 | return 0; | ||
1800 | } | ||
1801 | |||
1732 | static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, | 1802 | static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, |
1733 | char *buf) | 1803 | char *buf) |
1734 | { | 1804 | { |
@@ -1861,6 +1931,7 @@ static struct scsi_host_template srp_template = { | |||
1861 | .module = THIS_MODULE, | 1931 | .module = THIS_MODULE, |
1862 | .name = "InfiniBand SRP initiator", | 1932 | .name = "InfiniBand SRP initiator", |
1863 | .proc_name = DRV_NAME, | 1933 | .proc_name = DRV_NAME, |
1934 | .slave_configure = srp_slave_configure, | ||
1864 | .info = srp_target_info, | 1935 | .info = srp_target_info, |
1865 | .queuecommand = srp_queuecommand, | 1936 | .queuecommand = srp_queuecommand, |
1866 | .eh_abort_handler = srp_abort, | 1937 | .eh_abort_handler = srp_abort, |
@@ -1894,11 +1965,14 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) | |||
1894 | return PTR_ERR(rport); | 1965 | return PTR_ERR(rport); |
1895 | } | 1966 | } |
1896 | 1967 | ||
1968 | rport->lld_data = target; | ||
1969 | |||
1897 | spin_lock(&host->target_lock); | 1970 | spin_lock(&host->target_lock); |
1898 | list_add_tail(&target->list, &host->target_list); | 1971 | list_add_tail(&target->list, &host->target_list); |
1899 | spin_unlock(&host->target_lock); | 1972 | spin_unlock(&host->target_lock); |
1900 | 1973 | ||
1901 | target->state = SRP_TARGET_LIVE; | 1974 | target->state = SRP_TARGET_LIVE; |
1975 | target->connected = false; | ||
1902 | 1976 | ||
1903 | scsi_scan_target(&target->scsi_host->shost_gendev, | 1977 | scsi_scan_target(&target->scsi_host->shost_gendev, |
1904 | 0, target->scsi_id, SCAN_WILD_CARD, 0); | 1978 | 0, target->scsi_id, SCAN_WILD_CARD, 0); |
@@ -2188,6 +2262,7 @@ static ssize_t srp_create_target(struct device *dev, | |||
2188 | sizeof (struct srp_indirect_buf) + | 2262 | sizeof (struct srp_indirect_buf) + |
2189 | target->cmd_sg_cnt * sizeof (struct srp_direct_buf); | 2263 | target->cmd_sg_cnt * sizeof (struct srp_direct_buf); |
2190 | 2264 | ||
2265 | INIT_WORK(&target->remove_work, srp_remove_work); | ||
2191 | spin_lock_init(&target->lock); | 2266 | spin_lock_init(&target->lock); |
2192 | INIT_LIST_HEAD(&target->free_tx); | 2267 | INIT_LIST_HEAD(&target->free_tx); |
2193 | INIT_LIST_HEAD(&target->free_reqs); | 2268 | INIT_LIST_HEAD(&target->free_reqs); |
@@ -2232,7 +2307,6 @@ static ssize_t srp_create_target(struct device *dev, | |||
2232 | if (ret) | 2307 | if (ret) |
2233 | goto err_free_ib; | 2308 | goto err_free_ib; |
2234 | 2309 | ||
2235 | target->qp_in_error = 0; | ||
2236 | ret = srp_connect_target(target); | 2310 | ret = srp_connect_target(target); |
2237 | if (ret) { | 2311 | if (ret) { |
2238 | shost_printk(KERN_ERR, target->scsi_host, | 2312 | shost_printk(KERN_ERR, target->scsi_host, |
@@ -2422,8 +2496,7 @@ static void srp_remove_one(struct ib_device *device) | |||
2422 | { | 2496 | { |
2423 | struct srp_device *srp_dev; | 2497 | struct srp_device *srp_dev; |
2424 | struct srp_host *host, *tmp_host; | 2498 | struct srp_host *host, *tmp_host; |
2425 | LIST_HEAD(target_list); | 2499 | struct srp_target_port *target; |
2426 | struct srp_target_port *target, *tmp_target; | ||
2427 | 2500 | ||
2428 | srp_dev = ib_get_client_data(device, &srp_client); | 2501 | srp_dev = ib_get_client_data(device, &srp_client); |
2429 | 2502 | ||
@@ -2436,35 +2509,17 @@ static void srp_remove_one(struct ib_device *device) | |||
2436 | wait_for_completion(&host->released); | 2509 | wait_for_completion(&host->released); |
2437 | 2510 | ||
2438 | /* | 2511 | /* |
2439 | * Mark all target ports as removed, so we stop queueing | 2512 | * Remove all target ports. |
2440 | * commands and don't try to reconnect. | ||
2441 | */ | 2513 | */ |
2442 | spin_lock(&host->target_lock); | 2514 | spin_lock(&host->target_lock); |
2443 | list_for_each_entry(target, &host->target_list, list) { | 2515 | list_for_each_entry(target, &host->target_list, list) |
2444 | spin_lock_irq(&target->lock); | 2516 | srp_queue_remove_work(target); |
2445 | target->state = SRP_TARGET_REMOVED; | ||
2446 | spin_unlock_irq(&target->lock); | ||
2447 | } | ||
2448 | spin_unlock(&host->target_lock); | 2517 | spin_unlock(&host->target_lock); |
2449 | 2518 | ||
2450 | /* | 2519 | /* |
2451 | * Wait for any reconnection tasks that may have | 2520 | * Wait for target port removal tasks. |
2452 | * started before we marked our target ports as | ||
2453 | * removed, and any target port removal tasks. | ||
2454 | */ | 2521 | */ |
2455 | flush_workqueue(ib_wq); | 2522 | flush_workqueue(system_long_wq); |
2456 | |||
2457 | list_for_each_entry_safe(target, tmp_target, | ||
2458 | &host->target_list, list) { | ||
2459 | srp_del_scsi_host_attr(target->scsi_host); | ||
2460 | srp_remove_host(target->scsi_host); | ||
2461 | scsi_remove_host(target->scsi_host); | ||
2462 | srp_disconnect_target(target); | ||
2463 | ib_destroy_cm_id(target->cm_id); | ||
2464 | srp_free_target_ib(target); | ||
2465 | srp_free_req_data(target); | ||
2466 | scsi_host_put(target->scsi_host); | ||
2467 | } | ||
2468 | 2523 | ||
2469 | kfree(host); | 2524 | kfree(host); |
2470 | } | 2525 | } |
@@ -2478,6 +2533,7 @@ static void srp_remove_one(struct ib_device *device) | |||
2478 | } | 2533 | } |
2479 | 2534 | ||
2480 | static struct srp_function_template ib_srp_transport_functions = { | 2535 | static struct srp_function_template ib_srp_transport_functions = { |
2536 | .rport_delete = srp_rport_delete, | ||
2481 | }; | 2537 | }; |
2482 | 2538 | ||
2483 | static int __init srp_init_module(void) | 2539 | static int __init srp_init_module(void) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 020caf0c3789..de2d0b3c0bfe 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -80,9 +80,7 @@ enum { | |||
80 | 80 | ||
81 | enum srp_target_state { | 81 | enum srp_target_state { |
82 | SRP_TARGET_LIVE, | 82 | SRP_TARGET_LIVE, |
83 | SRP_TARGET_CONNECTING, | 83 | SRP_TARGET_REMOVED, |
84 | SRP_TARGET_DEAD, | ||
85 | SRP_TARGET_REMOVED | ||
86 | }; | 84 | }; |
87 | 85 | ||
88 | enum srp_iu_type { | 86 | enum srp_iu_type { |
@@ -163,6 +161,9 @@ struct srp_target_port { | |||
163 | struct ib_sa_query *path_query; | 161 | struct ib_sa_query *path_query; |
164 | int path_query_id; | 162 | int path_query_id; |
165 | 163 | ||
164 | u32 rq_tmo_jiffies; | ||
165 | bool connected; | ||
166 | |||
166 | struct ib_cm_id *cm_id; | 167 | struct ib_cm_id *cm_id; |
167 | 168 | ||
168 | int max_ti_iu_len; | 169 | int max_ti_iu_len; |
@@ -173,12 +174,12 @@ struct srp_target_port { | |||
173 | struct srp_iu *rx_ring[SRP_RQ_SIZE]; | 174 | struct srp_iu *rx_ring[SRP_RQ_SIZE]; |
174 | struct srp_request req_ring[SRP_CMD_SQ_SIZE]; | 175 | struct srp_request req_ring[SRP_CMD_SQ_SIZE]; |
175 | 176 | ||
176 | struct work_struct work; | 177 | struct work_struct remove_work; |
177 | 178 | ||
178 | struct list_head list; | 179 | struct list_head list; |
179 | struct completion done; | 180 | struct completion done; |
180 | int status; | 181 | int status; |
181 | int qp_in_error; | 182 | bool qp_in_error; |
182 | 183 | ||
183 | struct completion tsk_mgmt_done; | 184 | struct completion tsk_mgmt_done; |
184 | u8 tsk_mgmt_status; | 185 | u8 tsk_mgmt_status; |