aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/srp
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/infiniband/ulp/srp
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/infiniband/ulp/srp')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c499
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h11
2 files changed, 212 insertions, 298 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d5088ce7829..0bfa545675b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -30,8 +30,6 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#define pr_fmt(fmt) PFX fmt
34
35#include <linux/module.h> 33#include <linux/module.h>
36#include <linux/init.h> 34#include <linux/init.h>
37#include <linux/slab.h> 35#include <linux/slab.h>
@@ -167,7 +165,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
167 165
168static void srp_qp_event(struct ib_event *event, void *context) 166static void srp_qp_event(struct ib_event *event, void *context)
169{ 167{
170 pr_debug("QP event %d\n", event->event); 168 printk(KERN_ERR PFX "QP event %d\n", event->event);
171} 169}
172 170
173static int srp_init_qp(struct srp_target_port *target, 171static int srp_init_qp(struct srp_target_port *target,
@@ -222,29 +220,27 @@ static int srp_new_cm_id(struct srp_target_port *target)
222static int srp_create_target_ib(struct srp_target_port *target) 220static int srp_create_target_ib(struct srp_target_port *target)
223{ 221{
224 struct ib_qp_init_attr *init_attr; 222 struct ib_qp_init_attr *init_attr;
225 struct ib_cq *recv_cq, *send_cq;
226 struct ib_qp *qp;
227 int ret; 223 int ret;
228 224
229 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 225 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
230 if (!init_attr) 226 if (!init_attr)
231 return -ENOMEM; 227 return -ENOMEM;
232 228
233 recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, 229 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
234 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); 230 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
235 if (IS_ERR(recv_cq)) { 231 if (IS_ERR(target->recv_cq)) {
236 ret = PTR_ERR(recv_cq); 232 ret = PTR_ERR(target->recv_cq);
237 goto err; 233 goto err;
238 } 234 }
239 235
240 send_cq = ib_create_cq(target->srp_host->srp_dev->dev, 236 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
241 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); 237 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
242 if (IS_ERR(send_cq)) { 238 if (IS_ERR(target->send_cq)) {
243 ret = PTR_ERR(send_cq); 239 ret = PTR_ERR(target->send_cq);
244 goto err_recv_cq; 240 goto err_recv_cq;
245 } 241 }
246 242
247 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP); 243 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
248 244
249 init_attr->event_handler = srp_qp_event; 245 init_attr->event_handler = srp_qp_event;
250 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 246 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -253,41 +249,30 @@ static int srp_create_target_ib(struct srp_target_port *target)
253 init_attr->cap.max_send_sge = 1; 249 init_attr->cap.max_send_sge = 1;
254 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 250 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
255 init_attr->qp_type = IB_QPT_RC; 251 init_attr->qp_type = IB_QPT_RC;
256 init_attr->send_cq = send_cq; 252 init_attr->send_cq = target->send_cq;
257 init_attr->recv_cq = recv_cq; 253 init_attr->recv_cq = target->recv_cq;
258 254
259 qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); 255 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
260 if (IS_ERR(qp)) { 256 if (IS_ERR(target->qp)) {
261 ret = PTR_ERR(qp); 257 ret = PTR_ERR(target->qp);
262 goto err_send_cq; 258 goto err_send_cq;
263 } 259 }
264 260
265 ret = srp_init_qp(target, qp); 261 ret = srp_init_qp(target, target->qp);
266 if (ret) 262 if (ret)
267 goto err_qp; 263 goto err_qp;
268 264
269 if (target->qp)
270 ib_destroy_qp(target->qp);
271 if (target->recv_cq)
272 ib_destroy_cq(target->recv_cq);
273 if (target->send_cq)
274 ib_destroy_cq(target->send_cq);
275
276 target->qp = qp;
277 target->recv_cq = recv_cq;
278 target->send_cq = send_cq;
279
280 kfree(init_attr); 265 kfree(init_attr);
281 return 0; 266 return 0;
282 267
283err_qp: 268err_qp:
284 ib_destroy_qp(qp); 269 ib_destroy_qp(target->qp);
285 270
286err_send_cq: 271err_send_cq:
287 ib_destroy_cq(send_cq); 272 ib_destroy_cq(target->send_cq);
288 273
289err_recv_cq: 274err_recv_cq:
290 ib_destroy_cq(recv_cq); 275 ib_destroy_cq(target->recv_cq);
291 276
292err: 277err:
293 kfree(init_attr); 278 kfree(init_attr);
@@ -302,9 +287,6 @@ static void srp_free_target_ib(struct srp_target_port *target)
302 ib_destroy_cq(target->send_cq); 287 ib_destroy_cq(target->send_cq);
303 ib_destroy_cq(target->recv_cq); 288 ib_destroy_cq(target->recv_cq);
304 289
305 target->qp = NULL;
306 target->send_cq = target->recv_cq = NULL;
307
308 for (i = 0; i < SRP_RQ_SIZE; ++i) 290 for (i = 0; i < SRP_RQ_SIZE; ++i)
309 srp_free_iu(target->srp_host, target->rx_ring[i]); 291 srp_free_iu(target->srp_host, target->rx_ring[i]);
310 for (i = 0; i < SRP_SQ_SIZE; ++i) 292 for (i = 0; i < SRP_SQ_SIZE; ++i)
@@ -444,50 +426,34 @@ static int srp_send_req(struct srp_target_port *target)
444 return status; 426 return status;
445} 427}
446 428
447static bool srp_queue_remove_work(struct srp_target_port *target) 429static void srp_disconnect_target(struct srp_target_port *target)
448{ 430{
449 bool changed = false; 431 /* XXX should send SRP_I_LOGOUT request */
450 432
451 spin_lock_irq(&target->lock); 433 init_completion(&target->done);
452 if (target->state != SRP_TARGET_REMOVED) { 434 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
453 target->state = SRP_TARGET_REMOVED; 435 shost_printk(KERN_DEBUG, target->scsi_host,
454 changed = true; 436 PFX "Sending CM DREQ failed\n");
437 return;
455 } 438 }
456 spin_unlock_irq(&target->lock); 439 wait_for_completion(&target->done);
457
458 if (changed)
459 queue_work(system_long_wq, &target->remove_work);
460
461 return changed;
462} 440}
463 441
464static bool srp_change_conn_state(struct srp_target_port *target, 442static bool srp_change_state(struct srp_target_port *target,
465 bool connected) 443 enum srp_target_state old,
444 enum srp_target_state new)
466{ 445{
467 bool changed = false; 446 bool changed = false;
468 447
469 spin_lock_irq(&target->lock); 448 spin_lock_irq(&target->lock);
470 if (target->connected != connected) { 449 if (target->state == old) {
471 target->connected = connected; 450 target->state = new;
472 changed = true; 451 changed = true;
473 } 452 }
474 spin_unlock_irq(&target->lock); 453 spin_unlock_irq(&target->lock);
475
476 return changed; 454 return changed;
477} 455}
478 456
479static void srp_disconnect_target(struct srp_target_port *target)
480{
481 if (srp_change_conn_state(target, false)) {
482 /* XXX should send SRP_I_LOGOUT request */
483
484 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
485 shost_printk(KERN_DEBUG, target->scsi_host,
486 PFX "Sending CM DREQ failed\n");
487 }
488 }
489}
490
491static void srp_free_req_data(struct srp_target_port *target) 457static void srp_free_req_data(struct srp_target_port *target)
492{ 458{
493 struct ib_device *ibdev = target->srp_host->srp_dev->dev; 459 struct ib_device *ibdev = target->srp_host->srp_dev->dev;
@@ -506,65 +472,31 @@ static void srp_free_req_data(struct srp_target_port *target)
506 } 472 }
507} 473}
508 474
509/** 475static void srp_remove_work(struct work_struct *work)
510 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
511 * @shost: SCSI host whose attributes to remove from sysfs.
512 *
513 * Note: Any attributes defined in the host template and that did not exist
514 * before invocation of this function will be ignored.
515 */
516static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
517{ 476{
518 struct device_attribute **attr; 477 struct srp_target_port *target =
478 container_of(work, struct srp_target_port, work);
519 479
520 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 480 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
521 device_remove_file(&shost->shost_dev, *attr); 481 return;
522}
523 482
524static void srp_remove_target(struct srp_target_port *target) 483 spin_lock(&target->srp_host->target_lock);
525{ 484 list_del(&target->list);
526 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 485 spin_unlock(&target->srp_host->target_lock);
527 486
528 srp_del_scsi_host_attr(target->scsi_host);
529 srp_remove_host(target->scsi_host); 487 srp_remove_host(target->scsi_host);
530 scsi_remove_host(target->scsi_host); 488 scsi_remove_host(target->scsi_host);
531 srp_disconnect_target(target);
532 ib_destroy_cm_id(target->cm_id); 489 ib_destroy_cm_id(target->cm_id);
533 srp_free_target_ib(target); 490 srp_free_target_ib(target);
534 srp_free_req_data(target); 491 srp_free_req_data(target);
535 scsi_host_put(target->scsi_host); 492 scsi_host_put(target->scsi_host);
536} 493}
537 494
538static void srp_remove_work(struct work_struct *work)
539{
540 struct srp_target_port *target =
541 container_of(work, struct srp_target_port, remove_work);
542
543 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
544
545 spin_lock(&target->srp_host->target_lock);
546 list_del(&target->list);
547 spin_unlock(&target->srp_host->target_lock);
548
549 srp_remove_target(target);
550}
551
552static void srp_rport_delete(struct srp_rport *rport)
553{
554 struct srp_target_port *target = rport->lld_data;
555
556 srp_queue_remove_work(target);
557}
558
559static int srp_connect_target(struct srp_target_port *target) 495static int srp_connect_target(struct srp_target_port *target)
560{ 496{
561 int retries = 3; 497 int retries = 3;
562 int ret; 498 int ret;
563 499
564 WARN_ON_ONCE(target->connected);
565
566 target->qp_in_error = false;
567
568 ret = srp_lookup_path(target); 500 ret = srp_lookup_path(target);
569 if (ret) 501 if (ret)
570 return ret; 502 return ret;
@@ -584,7 +516,6 @@ static int srp_connect_target(struct srp_target_port *target)
584 */ 516 */
585 switch (target->status) { 517 switch (target->status) {
586 case 0: 518 case 0:
587 srp_change_conn_state(target, true);
588 return 0; 519 return 0;
589 520
590 case SRP_PORT_REDIRECT: 521 case SRP_PORT_REDIRECT:
@@ -637,74 +568,35 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
637 scmnd->sc_data_direction); 568 scmnd->sc_data_direction);
638} 569}
639 570
640/** 571static void srp_remove_req(struct srp_target_port *target,
641 * srp_claim_req - Take ownership of the scmnd associated with a request. 572 struct srp_request *req, s32 req_lim_delta)
642 * @target: SRP target port.
643 * @req: SRP request.
644 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
645 * ownership of @req->scmnd if it equals @scmnd.
646 *
647 * Return value:
648 * Either NULL or a pointer to the SCSI command the caller became owner of.
649 */
650static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
651 struct srp_request *req,
652 struct scsi_cmnd *scmnd)
653{ 573{
654 unsigned long flags; 574 unsigned long flags;
655 575
656 spin_lock_irqsave(&target->lock, flags); 576 srp_unmap_data(req->scmnd, target, req);
657 if (!scmnd) {
658 scmnd = req->scmnd;
659 req->scmnd = NULL;
660 } else if (req->scmnd == scmnd) {
661 req->scmnd = NULL;
662 } else {
663 scmnd = NULL;
664 }
665 spin_unlock_irqrestore(&target->lock, flags);
666
667 return scmnd;
668}
669
670/**
671 * srp_free_req() - Unmap data and add request to the free request list.
672 */
673static void srp_free_req(struct srp_target_port *target,
674 struct srp_request *req, struct scsi_cmnd *scmnd,
675 s32 req_lim_delta)
676{
677 unsigned long flags;
678
679 srp_unmap_data(scmnd, target, req);
680
681 spin_lock_irqsave(&target->lock, flags); 577 spin_lock_irqsave(&target->lock, flags);
682 target->req_lim += req_lim_delta; 578 target->req_lim += req_lim_delta;
579 req->scmnd = NULL;
683 list_add_tail(&req->list, &target->free_reqs); 580 list_add_tail(&req->list, &target->free_reqs);
684 spin_unlock_irqrestore(&target->lock, flags); 581 spin_unlock_irqrestore(&target->lock, flags);
685} 582}
686 583
687static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 584static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
688{ 585{
689 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); 586 req->scmnd->result = DID_RESET << 16;
690 587 req->scmnd->scsi_done(req->scmnd);
691 if (scmnd) { 588 srp_remove_req(target, req, 0);
692 srp_free_req(target, req, scmnd, 0);
693 scmnd->result = DID_RESET << 16;
694 scmnd->scsi_done(scmnd);
695 }
696} 589}
697 590
698static int srp_reconnect_target(struct srp_target_port *target) 591static int srp_reconnect_target(struct srp_target_port *target)
699{ 592{
700 struct Scsi_Host *shost = target->scsi_host; 593 struct ib_qp_attr qp_attr;
594 struct ib_wc wc;
701 int i, ret; 595 int i, ret;
702 596
703 if (target->state != SRP_TARGET_LIVE) 597 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
704 return -EAGAIN; 598 return -EAGAIN;
705 599
706 scsi_target_block(&shost->shost_gendev);
707
708 srp_disconnect_target(target); 600 srp_disconnect_target(target);
709 /* 601 /*
710 * Now get a new local CM ID so that we avoid confusing the 602 * Now get a new local CM ID so that we avoid confusing the
@@ -712,11 +604,21 @@ static int srp_reconnect_target(struct srp_target_port *target)
712 */ 604 */
713 ret = srp_new_cm_id(target); 605 ret = srp_new_cm_id(target);
714 if (ret) 606 if (ret)
715 goto unblock; 607 goto err;
716 608
717 ret = srp_create_target_ib(target); 609 qp_attr.qp_state = IB_QPS_RESET;
610 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
611 if (ret)
612 goto err;
613
614 ret = srp_init_qp(target, target->qp);
718 if (ret) 615 if (ret)
719 goto unblock; 616 goto err;
617
618 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
619 ; /* nothing */
620 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
621 ; /* nothing */
720 622
721 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 623 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
722 struct srp_request *req = &target->req_ring[i]; 624 struct srp_request *req = &target->req_ring[i];
@@ -728,16 +630,13 @@ static int srp_reconnect_target(struct srp_target_port *target)
728 for (i = 0; i < SRP_SQ_SIZE; ++i) 630 for (i = 0; i < SRP_SQ_SIZE; ++i)
729 list_add(&target->tx_ring[i]->list, &target->free_tx); 631 list_add(&target->tx_ring[i]->list, &target->free_tx);
730 632
633 target->qp_in_error = 0;
731 ret = srp_connect_target(target); 634 ret = srp_connect_target(target);
732
733unblock:
734 scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
735 SDEV_TRANSPORT_OFFLINE);
736
737 if (ret) 635 if (ret)
738 goto err; 636 goto err;
739 637
740 shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n"); 638 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
639 ret = -EAGAIN;
741 640
742 return ret; 641 return ret;
743 642
@@ -750,8 +649,17 @@ err:
750 * However, we have to defer the real removal because we 649 * However, we have to defer the real removal because we
751 * are in the context of the SCSI error handler now, which 650 * are in the context of the SCSI error handler now, which
752 * will deadlock if we call scsi_remove_host(). 651 * will deadlock if we call scsi_remove_host().
652 *
653 * Schedule our work inside the lock to avoid a race with
654 * the flush_scheduled_work() in srp_remove_one().
753 */ 655 */
754 srp_queue_remove_work(target); 656 spin_lock_irq(&target->lock);
657 if (target->state == SRP_TARGET_CONNECTING) {
658 target->state = SRP_TARGET_DEAD;
659 INIT_WORK(&target->work, srp_remove_work);
660 queue_work(ib_wq, &target->work);
661 }
662 spin_unlock_irq(&target->lock);
755 663
756 return ret; 664 return ret;
757} 665}
@@ -1147,18 +1055,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1147 complete(&target->tsk_mgmt_done); 1055 complete(&target->tsk_mgmt_done);
1148 } else { 1056 } else {
1149 req = &target->req_ring[rsp->tag]; 1057 req = &target->req_ring[rsp->tag];
1150 scmnd = srp_claim_req(target, req, NULL); 1058 scmnd = req->scmnd;
1151 if (!scmnd) { 1059 if (!scmnd)
1152 shost_printk(KERN_ERR, target->scsi_host, 1060 shost_printk(KERN_ERR, target->scsi_host,
1153 "Null scmnd for RSP w/tag %016llx\n", 1061 "Null scmnd for RSP w/tag %016llx\n",
1154 (unsigned long long) rsp->tag); 1062 (unsigned long long) rsp->tag);
1155
1156 spin_lock_irqsave(&target->lock, flags);
1157 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
1158 spin_unlock_irqrestore(&target->lock, flags);
1159
1160 return;
1161 }
1162 scmnd->result = rsp->status; 1063 scmnd->result = rsp->status;
1163 1064
1164 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1065 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
@@ -1173,9 +1074,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1173 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 1074 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1174 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1075 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1175 1076
1176 srp_free_req(target, req, scmnd, 1077 srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
1177 be32_to_cpu(rsp->req_lim_delta));
1178
1179 scmnd->host_scribble = NULL; 1078 scmnd->host_scribble = NULL;
1180 scmnd->scsi_done(scmnd); 1079 scmnd->scsi_done(scmnd);
1181 } 1080 }
@@ -1298,19 +1197,6 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1298 PFX "Recv failed with error code %d\n", res); 1197 PFX "Recv failed with error code %d\n", res);
1299} 1198}
1300 1199
1301static void srp_handle_qp_err(enum ib_wc_status wc_status,
1302 enum ib_wc_opcode wc_opcode,
1303 struct srp_target_port *target)
1304{
1305 if (target->connected && !target->qp_in_error) {
1306 shost_printk(KERN_ERR, target->scsi_host,
1307 PFX "failed %s status %d\n",
1308 wc_opcode & IB_WC_RECV ? "receive" : "send",
1309 wc_status);
1310 }
1311 target->qp_in_error = true;
1312}
1313
1314static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) 1200static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1315{ 1201{
1316 struct srp_target_port *target = target_ptr; 1202 struct srp_target_port *target = target_ptr;
@@ -1318,11 +1204,15 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1318 1204
1319 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1205 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1320 while (ib_poll_cq(cq, 1, &wc) > 0) { 1206 while (ib_poll_cq(cq, 1, &wc) > 0) {
1321 if (likely(wc.status == IB_WC_SUCCESS)) { 1207 if (wc.status) {
1322 srp_handle_recv(target, &wc); 1208 shost_printk(KERN_ERR, target->scsi_host,
1323 } else { 1209 PFX "failed receive status %d\n",
1324 srp_handle_qp_err(wc.status, wc.opcode, target); 1210 wc.status);
1211 target->qp_in_error = 1;
1212 break;
1325 } 1213 }
1214
1215 srp_handle_recv(target, &wc);
1326 } 1216 }
1327} 1217}
1328 1218
@@ -1333,12 +1223,16 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1333 struct srp_iu *iu; 1223 struct srp_iu *iu;
1334 1224
1335 while (ib_poll_cq(cq, 1, &wc) > 0) { 1225 while (ib_poll_cq(cq, 1, &wc) > 0) {
1336 if (likely(wc.status == IB_WC_SUCCESS)) { 1226 if (wc.status) {
1337 iu = (struct srp_iu *) (uintptr_t) wc.wr_id; 1227 shost_printk(KERN_ERR, target->scsi_host,
1338 list_add(&iu->list, &target->free_tx); 1228 PFX "failed send status %d\n",
1339 } else { 1229 wc.status);
1340 srp_handle_qp_err(wc.status, wc.opcode, target); 1230 target->qp_in_error = 1;
1231 break;
1341 } 1232 }
1233
1234 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1235 list_add(&iu->list, &target->free_tx);
1342 } 1236 }
1343} 1237}
1344 1238
@@ -1352,6 +1246,16 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1352 unsigned long flags; 1246 unsigned long flags;
1353 int len; 1247 int len;
1354 1248
1249 if (target->state == SRP_TARGET_CONNECTING)
1250 goto err;
1251
1252 if (target->state == SRP_TARGET_DEAD ||
1253 target->state == SRP_TARGET_REMOVED) {
1254 scmnd->result = DID_BAD_TARGET << 16;
1255 scmnd->scsi_done(scmnd);
1256 return 0;
1257 }
1258
1355 spin_lock_irqsave(&target->lock, flags); 1259 spin_lock_irqsave(&target->lock, flags);
1356 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1260 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1357 if (!iu) 1261 if (!iu)
@@ -1408,6 +1312,7 @@ err_iu:
1408err_unlock: 1312err_unlock:
1409 spin_unlock_irqrestore(&target->lock, flags); 1313 spin_unlock_irqrestore(&target->lock, flags);
1410 1314
1315err:
1411 return SCSI_MLQUEUE_HOST_BUSY; 1316 return SCSI_MLQUEUE_HOST_BUSY;
1412} 1317}
1413 1318
@@ -1449,33 +1354,6 @@ err:
1449 return -ENOMEM; 1354 return -ENOMEM;
1450} 1355}
1451 1356
1452static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
1453{
1454 uint64_t T_tr_ns, max_compl_time_ms;
1455 uint32_t rq_tmo_jiffies;
1456
1457 /*
1458 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1459 * table 91), both the QP timeout and the retry count have to be set
1460 * for RC QP's during the RTR to RTS transition.
1461 */
1462 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
1463 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
1464
1465 /*
1466 * Set target->rq_tmo_jiffies to one second more than the largest time
1467 * it can take before an error completion is generated. See also
1468 * C9-140..142 in the IBTA spec for more information about how to
1469 * convert the QP Local ACK Timeout value to nanoseconds.
1470 */
1471 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
1472 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
1473 do_div(max_compl_time_ms, NSEC_PER_MSEC);
1474 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
1475
1476 return rq_tmo_jiffies;
1477}
1478
1479static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 1357static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1480 struct srp_login_rsp *lrsp, 1358 struct srp_login_rsp *lrsp,
1481 struct srp_target_port *target) 1359 struct srp_target_port *target)
@@ -1535,8 +1413,6 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
1535 if (ret) 1413 if (ret)
1536 goto error_free; 1414 goto error_free;
1537 1415
1538 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
1539
1540 ret = ib_modify_qp(target->qp, qp_attr, attr_mask); 1416 ret = ib_modify_qp(target->qp, qp_attr, attr_mask);
1541 if (ret) 1417 if (ret)
1542 goto error_free; 1418 goto error_free;
@@ -1658,7 +1534,6 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1658 case IB_CM_DREQ_RECEIVED: 1534 case IB_CM_DREQ_RECEIVED:
1659 shost_printk(KERN_WARNING, target->scsi_host, 1535 shost_printk(KERN_WARNING, target->scsi_host,
1660 PFX "DREQ received - connection closed\n"); 1536 PFX "DREQ received - connection closed\n");
1661 srp_change_conn_state(target, false);
1662 if (ib_send_cm_drep(cm_id, NULL, 0)) 1537 if (ib_send_cm_drep(cm_id, NULL, 0))
1663 shost_printk(KERN_ERR, target->scsi_host, 1538 shost_printk(KERN_ERR, target->scsi_host,
1664 PFX "Sending CM DREP failed\n"); 1539 PFX "Sending CM DREP failed\n");
@@ -1668,6 +1543,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1668 shost_printk(KERN_ERR, target->scsi_host, 1543 shost_printk(KERN_ERR, target->scsi_host,
1669 PFX "connection closed\n"); 1544 PFX "connection closed\n");
1670 1545
1546 comp = 1;
1671 target->status = 0; 1547 target->status = 0;
1672 break; 1548 break;
1673 1549
@@ -1695,6 +1571,10 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1695 struct srp_iu *iu; 1571 struct srp_iu *iu;
1696 struct srp_tsk_mgmt *tsk_mgmt; 1572 struct srp_tsk_mgmt *tsk_mgmt;
1697 1573
1574 if (target->state == SRP_TARGET_DEAD ||
1575 target->state == SRP_TARGET_REMOVED)
1576 return -1;
1577
1698 init_completion(&target->tsk_mgmt_done); 1578 init_completion(&target->tsk_mgmt_done);
1699 1579
1700 spin_lock_irq(&target->lock); 1580 spin_lock_irq(&target->lock);
@@ -1733,18 +1613,25 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1733{ 1613{
1734 struct srp_target_port *target = host_to_target(scmnd->device->host); 1614 struct srp_target_port *target = host_to_target(scmnd->device->host);
1735 struct srp_request *req = (struct srp_request *) scmnd->host_scribble; 1615 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1616 int ret = SUCCESS;
1736 1617
1737 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1618 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1738 1619
1739 if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) 1620 if (!req || target->qp_in_error)
1621 return FAILED;
1622 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1623 SRP_TSK_ABORT_TASK))
1740 return FAILED; 1624 return FAILED;
1741 srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1742 SRP_TSK_ABORT_TASK);
1743 srp_free_req(target, req, scmnd, 0);
1744 scmnd->result = DID_ABORT << 16;
1745 scmnd->scsi_done(scmnd);
1746 1625
1747 return SUCCESS; 1626 if (req->scmnd) {
1627 if (!target->tsk_mgmt_status) {
1628 srp_remove_req(target, req, 0);
1629 scmnd->result = DID_ABORT << 16;
1630 } else
1631 ret = FAILED;
1632 }
1633
1634 return ret;
1748} 1635}
1749 1636
1750static int srp_reset_device(struct scsi_cmnd *scmnd) 1637static int srp_reset_device(struct scsi_cmnd *scmnd)
@@ -1784,26 +1671,15 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
1784 return ret; 1671 return ret;
1785} 1672}
1786 1673
1787static int srp_slave_configure(struct scsi_device *sdev)
1788{
1789 struct Scsi_Host *shost = sdev->host;
1790 struct srp_target_port *target = host_to_target(shost);
1791 struct request_queue *q = sdev->request_queue;
1792 unsigned long timeout;
1793
1794 if (sdev->type == TYPE_DISK) {
1795 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
1796 blk_queue_rq_timeout(q, timeout);
1797 }
1798
1799 return 0;
1800}
1801
1802static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 1674static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1803 char *buf) 1675 char *buf)
1804{ 1676{
1805 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1677 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1806 1678
1679 if (target->state == SRP_TARGET_DEAD ||
1680 target->state == SRP_TARGET_REMOVED)
1681 return -ENODEV;
1682
1807 return sprintf(buf, "0x%016llx\n", 1683 return sprintf(buf, "0x%016llx\n",
1808 (unsigned long long) be64_to_cpu(target->id_ext)); 1684 (unsigned long long) be64_to_cpu(target->id_ext));
1809} 1685}
@@ -1813,6 +1689,10 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1813{ 1689{
1814 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1690 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1815 1691
1692 if (target->state == SRP_TARGET_DEAD ||
1693 target->state == SRP_TARGET_REMOVED)
1694 return -ENODEV;
1695
1816 return sprintf(buf, "0x%016llx\n", 1696 return sprintf(buf, "0x%016llx\n",
1817 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1697 (unsigned long long) be64_to_cpu(target->ioc_guid));
1818} 1698}
@@ -1822,6 +1702,10 @@ static ssize_t show_service_id(struct device *dev,
1822{ 1702{
1823 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1703 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1824 1704
1705 if (target->state == SRP_TARGET_DEAD ||
1706 target->state == SRP_TARGET_REMOVED)
1707 return -ENODEV;
1708
1825 return sprintf(buf, "0x%016llx\n", 1709 return sprintf(buf, "0x%016llx\n",
1826 (unsigned long long) be64_to_cpu(target->service_id)); 1710 (unsigned long long) be64_to_cpu(target->service_id));
1827} 1711}
@@ -1831,6 +1715,10 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1831{ 1715{
1832 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1716 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1833 1717
1718 if (target->state == SRP_TARGET_DEAD ||
1719 target->state == SRP_TARGET_REMOVED)
1720 return -ENODEV;
1721
1834 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1722 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1835} 1723}
1836 1724
@@ -1839,6 +1727,10 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1839{ 1727{
1840 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1728 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1841 1729
1730 if (target->state == SRP_TARGET_DEAD ||
1731 target->state == SRP_TARGET_REMOVED)
1732 return -ENODEV;
1733
1842 return sprintf(buf, "%pI6\n", target->path.dgid.raw); 1734 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1843} 1735}
1844 1736
@@ -1847,6 +1739,10 @@ static ssize_t show_orig_dgid(struct device *dev,
1847{ 1739{
1848 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1740 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1849 1741
1742 if (target->state == SRP_TARGET_DEAD ||
1743 target->state == SRP_TARGET_REMOVED)
1744 return -ENODEV;
1745
1850 return sprintf(buf, "%pI6\n", target->orig_dgid); 1746 return sprintf(buf, "%pI6\n", target->orig_dgid);
1851} 1747}
1852 1748
@@ -1855,6 +1751,10 @@ static ssize_t show_req_lim(struct device *dev,
1855{ 1751{
1856 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1752 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1857 1753
1754 if (target->state == SRP_TARGET_DEAD ||
1755 target->state == SRP_TARGET_REMOVED)
1756 return -ENODEV;
1757
1858 return sprintf(buf, "%d\n", target->req_lim); 1758 return sprintf(buf, "%d\n", target->req_lim);
1859} 1759}
1860 1760
@@ -1863,6 +1763,10 @@ static ssize_t show_zero_req_lim(struct device *dev,
1863{ 1763{
1864 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1764 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1865 1765
1766 if (target->state == SRP_TARGET_DEAD ||
1767 target->state == SRP_TARGET_REMOVED)
1768 return -ENODEV;
1769
1866 return sprintf(buf, "%d\n", target->zero_req_lim); 1770 return sprintf(buf, "%d\n", target->zero_req_lim);
1867} 1771}
1868 1772
@@ -1931,7 +1835,6 @@ static struct scsi_host_template srp_template = {
1931 .module = THIS_MODULE, 1835 .module = THIS_MODULE,
1932 .name = "InfiniBand SRP initiator", 1836 .name = "InfiniBand SRP initiator",
1933 .proc_name = DRV_NAME, 1837 .proc_name = DRV_NAME,
1934 .slave_configure = srp_slave_configure,
1935 .info = srp_target_info, 1838 .info = srp_target_info,
1936 .queuecommand = srp_queuecommand, 1839 .queuecommand = srp_queuecommand,
1937 .eh_abort_handler = srp_abort, 1840 .eh_abort_handler = srp_abort,
@@ -1965,14 +1868,11 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1965 return PTR_ERR(rport); 1868 return PTR_ERR(rport);
1966 } 1869 }
1967 1870
1968 rport->lld_data = target;
1969
1970 spin_lock(&host->target_lock); 1871 spin_lock(&host->target_lock);
1971 list_add_tail(&target->list, &host->target_list); 1872 list_add_tail(&target->list, &host->target_list);
1972 spin_unlock(&host->target_lock); 1873 spin_unlock(&host->target_lock);
1973 1874
1974 target->state = SRP_TARGET_LIVE; 1875 target->state = SRP_TARGET_LIVE;
1975 target->connected = false;
1976 1876
1977 scsi_scan_target(&target->scsi_host->shost_gendev, 1877 scsi_scan_target(&target->scsi_host->shost_gendev,
1978 0, target->scsi_id, SCAN_WILD_CARD, 0); 1878 0, target->scsi_id, SCAN_WILD_CARD, 0);
@@ -2089,7 +1989,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2089 goto out; 1989 goto out;
2090 } 1990 }
2091 if (strlen(p) != 32) { 1991 if (strlen(p) != 32) {
2092 pr_warn("bad dest GID parameter '%s'\n", p); 1992 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
2093 kfree(p); 1993 kfree(p);
2094 goto out; 1994 goto out;
2095 } 1995 }
@@ -2104,7 +2004,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2104 2004
2105 case SRP_OPT_PKEY: 2005 case SRP_OPT_PKEY:
2106 if (match_hex(args, &token)) { 2006 if (match_hex(args, &token)) {
2107 pr_warn("bad P_Key parameter '%s'\n", p); 2007 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
2108 goto out; 2008 goto out;
2109 } 2009 }
2110 target->path.pkey = cpu_to_be16(token); 2010 target->path.pkey = cpu_to_be16(token);
@@ -2123,7 +2023,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2123 2023
2124 case SRP_OPT_MAX_SECT: 2024 case SRP_OPT_MAX_SECT:
2125 if (match_int(args, &token)) { 2025 if (match_int(args, &token)) {
2126 pr_warn("bad max sect parameter '%s'\n", p); 2026 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
2127 goto out; 2027 goto out;
2128 } 2028 }
2129 target->scsi_host->max_sectors = token; 2029 target->scsi_host->max_sectors = token;
@@ -2131,8 +2031,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2131 2031
2132 case SRP_OPT_MAX_CMD_PER_LUN: 2032 case SRP_OPT_MAX_CMD_PER_LUN:
2133 if (match_int(args, &token)) { 2033 if (match_int(args, &token)) {
2134 pr_warn("bad max cmd_per_lun parameter '%s'\n", 2034 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
2135 p);
2136 goto out; 2035 goto out;
2137 } 2036 }
2138 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); 2037 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
@@ -2140,14 +2039,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2140 2039
2141 case SRP_OPT_IO_CLASS: 2040 case SRP_OPT_IO_CLASS:
2142 if (match_hex(args, &token)) { 2041 if (match_hex(args, &token)) {
2143 pr_warn("bad IO class parameter '%s'\n", p); 2042 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
2144 goto out; 2043 goto out;
2145 } 2044 }
2146 if (token != SRP_REV10_IB_IO_CLASS && 2045 if (token != SRP_REV10_IB_IO_CLASS &&
2147 token != SRP_REV16A_IB_IO_CLASS) { 2046 token != SRP_REV16A_IB_IO_CLASS) {
2148 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 2047 printk(KERN_WARNING PFX "unknown IO class parameter value"
2149 token, SRP_REV10_IB_IO_CLASS, 2048 " %x specified (use %x or %x).\n",
2150 SRP_REV16A_IB_IO_CLASS); 2049 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
2151 goto out; 2050 goto out;
2152 } 2051 }
2153 target->io_class = token; 2052 target->io_class = token;
@@ -2165,8 +2064,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2165 2064
2166 case SRP_OPT_CMD_SG_ENTRIES: 2065 case SRP_OPT_CMD_SG_ENTRIES:
2167 if (match_int(args, &token) || token < 1 || token > 255) { 2066 if (match_int(args, &token) || token < 1 || token > 255) {
2168 pr_warn("bad max cmd_sg_entries parameter '%s'\n", 2067 printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p);
2169 p);
2170 goto out; 2068 goto out;
2171 } 2069 }
2172 target->cmd_sg_cnt = token; 2070 target->cmd_sg_cnt = token;
@@ -2174,7 +2072,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2174 2072
2175 case SRP_OPT_ALLOW_EXT_SG: 2073 case SRP_OPT_ALLOW_EXT_SG:
2176 if (match_int(args, &token)) { 2074 if (match_int(args, &token)) {
2177 pr_warn("bad allow_ext_sg parameter '%s'\n", p); 2075 printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p);
2178 goto out; 2076 goto out;
2179 } 2077 }
2180 target->allow_ext_sg = !!token; 2078 target->allow_ext_sg = !!token;
@@ -2183,16 +2081,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2183 case SRP_OPT_SG_TABLESIZE: 2081 case SRP_OPT_SG_TABLESIZE:
2184 if (match_int(args, &token) || token < 1 || 2082 if (match_int(args, &token) || token < 1 ||
2185 token > SCSI_MAX_SG_CHAIN_SEGMENTS) { 2083 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
2186 pr_warn("bad max sg_tablesize parameter '%s'\n", 2084 printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p);
2187 p);
2188 goto out; 2085 goto out;
2189 } 2086 }
2190 target->sg_tablesize = token; 2087 target->sg_tablesize = token;
2191 break; 2088 break;
2192 2089
2193 default: 2090 default:
2194 pr_warn("unknown parameter or missing value '%s' in target creation request\n", 2091 printk(KERN_WARNING PFX "unknown parameter or missing value "
2195 p); 2092 "'%s' in target creation request\n", p);
2196 goto out; 2093 goto out;
2197 } 2094 }
2198 } 2095 }
@@ -2203,8 +2100,9 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
2203 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 2100 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
2204 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 2101 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2205 !(srp_opt_tokens[i].token & opt_mask)) 2102 !(srp_opt_tokens[i].token & opt_mask))
2206 pr_warn("target creation request is missing parameter '%s'\n", 2103 printk(KERN_WARNING PFX "target creation request is "
2207 srp_opt_tokens[i].pattern); 2104 "missing parameter '%s'\n",
2105 srp_opt_tokens[i].pattern);
2208 2106
2209out: 2107out:
2210 kfree(options); 2108 kfree(options);
@@ -2251,7 +2149,7 @@ static ssize_t srp_create_target(struct device *dev,
2251 2149
2252 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && 2150 if (!host->srp_dev->fmr_pool && !target->allow_ext_sg &&
2253 target->cmd_sg_cnt < target->sg_tablesize) { 2151 target->cmd_sg_cnt < target->sg_tablesize) {
2254 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 2152 printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2255 target->sg_tablesize = target->cmd_sg_cnt; 2153 target->sg_tablesize = target->cmd_sg_cnt;
2256 } 2154 }
2257 2155
@@ -2262,7 +2160,6 @@ static ssize_t srp_create_target(struct device *dev,
2262 sizeof (struct srp_indirect_buf) + 2160 sizeof (struct srp_indirect_buf) +
2263 target->cmd_sg_cnt * sizeof (struct srp_direct_buf); 2161 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
2264 2162
2265 INIT_WORK(&target->remove_work, srp_remove_work);
2266 spin_lock_init(&target->lock); 2163 spin_lock_init(&target->lock);
2267 INIT_LIST_HEAD(&target->free_tx); 2164 INIT_LIST_HEAD(&target->free_tx);
2268 INIT_LIST_HEAD(&target->free_reqs); 2165 INIT_LIST_HEAD(&target->free_reqs);
@@ -2307,6 +2204,7 @@ static ssize_t srp_create_target(struct device *dev,
2307 if (ret) 2204 if (ret)
2308 goto err_free_ib; 2205 goto err_free_ib;
2309 2206
2207 target->qp_in_error = 0;
2310 ret = srp_connect_target(target); 2208 ret = srp_connect_target(target);
2311 if (ret) { 2209 if (ret) {
2312 shost_printk(KERN_ERR, target->scsi_host, 2210 shost_printk(KERN_ERR, target->scsi_host,
@@ -2411,7 +2309,8 @@ static void srp_add_one(struct ib_device *device)
2411 return; 2309 return;
2412 2310
2413 if (ib_query_device(device, dev_attr)) { 2311 if (ib_query_device(device, dev_attr)) {
2414 pr_warn("Query device failed for %s\n", device->name); 2312 printk(KERN_WARNING PFX "Query device failed for %s\n",
2313 device->name);
2415 goto free_attr; 2314 goto free_attr;
2416 } 2315 }
2417 2316
@@ -2496,7 +2395,8 @@ static void srp_remove_one(struct ib_device *device)
2496{ 2395{
2497 struct srp_device *srp_dev; 2396 struct srp_device *srp_dev;
2498 struct srp_host *host, *tmp_host; 2397 struct srp_host *host, *tmp_host;
2499 struct srp_target_port *target; 2398 LIST_HEAD(target_list);
2399 struct srp_target_port *target, *tmp_target;
2500 2400
2501 srp_dev = ib_get_client_data(device, &srp_client); 2401 srp_dev = ib_get_client_data(device, &srp_client);
2502 2402
@@ -2509,17 +2409,34 @@ static void srp_remove_one(struct ib_device *device)
2509 wait_for_completion(&host->released); 2409 wait_for_completion(&host->released);
2510 2410
2511 /* 2411 /*
2512 * Remove all target ports. 2412 * Mark all target ports as removed, so we stop queueing
2413 * commands and don't try to reconnect.
2513 */ 2414 */
2514 spin_lock(&host->target_lock); 2415 spin_lock(&host->target_lock);
2515 list_for_each_entry(target, &host->target_list, list) 2416 list_for_each_entry(target, &host->target_list, list) {
2516 srp_queue_remove_work(target); 2417 spin_lock_irq(&target->lock);
2418 target->state = SRP_TARGET_REMOVED;
2419 spin_unlock_irq(&target->lock);
2420 }
2517 spin_unlock(&host->target_lock); 2421 spin_unlock(&host->target_lock);
2518 2422
2519 /* 2423 /*
2520 * Wait for target port removal tasks. 2424 * Wait for any reconnection tasks that may have
2425 * started before we marked our target ports as
2426 * removed, and any target port removal tasks.
2521 */ 2427 */
2522 flush_workqueue(system_long_wq); 2428 flush_workqueue(ib_wq);
2429
2430 list_for_each_entry_safe(target, tmp_target,
2431 &host->target_list, list) {
2432 srp_remove_host(target->scsi_host);
2433 scsi_remove_host(target->scsi_host);
2434 srp_disconnect_target(target);
2435 ib_destroy_cm_id(target->cm_id);
2436 srp_free_target_ib(target);
2437 srp_free_req_data(target);
2438 scsi_host_put(target->scsi_host);
2439 }
2523 2440
2524 kfree(host); 2441 kfree(host);
2525 } 2442 }
@@ -2533,7 +2450,6 @@ static void srp_remove_one(struct ib_device *device)
2533} 2450}
2534 2451
2535static struct srp_function_template ib_srp_transport_functions = { 2452static struct srp_function_template ib_srp_transport_functions = {
2536 .rport_delete = srp_rport_delete,
2537}; 2453};
2538 2454
2539static int __init srp_init_module(void) 2455static int __init srp_init_module(void)
@@ -2543,7 +2459,7 @@ static int __init srp_init_module(void)
2543 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); 2459 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2544 2460
2545 if (srp_sg_tablesize) { 2461 if (srp_sg_tablesize) {
2546 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 2462 printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2547 if (!cmd_sg_entries) 2463 if (!cmd_sg_entries)
2548 cmd_sg_entries = srp_sg_tablesize; 2464 cmd_sg_entries = srp_sg_tablesize;
2549 } 2465 }
@@ -2552,15 +2468,14 @@ static int __init srp_init_module(void)
2552 cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 2468 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
2553 2469
2554 if (cmd_sg_entries > 255) { 2470 if (cmd_sg_entries > 255) {
2555 pr_warn("Clamping cmd_sg_entries to 255\n"); 2471 printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n");
2556 cmd_sg_entries = 255; 2472 cmd_sg_entries = 255;
2557 } 2473 }
2558 2474
2559 if (!indirect_sg_entries) 2475 if (!indirect_sg_entries)
2560 indirect_sg_entries = cmd_sg_entries; 2476 indirect_sg_entries = cmd_sg_entries;
2561 else if (indirect_sg_entries < cmd_sg_entries) { 2477 else if (indirect_sg_entries < cmd_sg_entries) {
2562 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 2478 printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries);
2563 cmd_sg_entries);
2564 indirect_sg_entries = cmd_sg_entries; 2479 indirect_sg_entries = cmd_sg_entries;
2565 } 2480 }
2566 2481
@@ -2571,7 +2486,7 @@ static int __init srp_init_module(void)
2571 2486
2572 ret = class_register(&srp_class); 2487 ret = class_register(&srp_class);
2573 if (ret) { 2488 if (ret) {
2574 pr_err("couldn't register class infiniband_srp\n"); 2489 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2575 srp_release_transport(ib_srp_transport_template); 2490 srp_release_transport(ib_srp_transport_template);
2576 return ret; 2491 return ret;
2577 } 2492 }
@@ -2580,7 +2495,7 @@ static int __init srp_init_module(void)
2580 2495
2581 ret = ib_register_client(&srp_client); 2496 ret = ib_register_client(&srp_client);
2582 if (ret) { 2497 if (ret) {
2583 pr_err("couldn't register IB client\n"); 2498 printk(KERN_ERR PFX "couldn't register IB client\n");
2584 srp_release_transport(ib_srp_transport_template); 2499 srp_release_transport(ib_srp_transport_template);
2585 ib_sa_unregister_client(&srp_sa_client); 2500 ib_sa_unregister_client(&srp_sa_client);
2586 class_unregister(&srp_class); 2501 class_unregister(&srp_class);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index de2d0b3c0bf..020caf0c378 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -80,7 +80,9 @@ enum {
80 80
81enum srp_target_state { 81enum srp_target_state {
82 SRP_TARGET_LIVE, 82 SRP_TARGET_LIVE,
83 SRP_TARGET_REMOVED, 83 SRP_TARGET_CONNECTING,
84 SRP_TARGET_DEAD,
85 SRP_TARGET_REMOVED
84}; 86};
85 87
86enum srp_iu_type { 88enum srp_iu_type {
@@ -161,9 +163,6 @@ struct srp_target_port {
161 struct ib_sa_query *path_query; 163 struct ib_sa_query *path_query;
162 int path_query_id; 164 int path_query_id;
163 165
164 u32 rq_tmo_jiffies;
165 bool connected;
166
167 struct ib_cm_id *cm_id; 166 struct ib_cm_id *cm_id;
168 167
169 int max_ti_iu_len; 168 int max_ti_iu_len;
@@ -174,12 +173,12 @@ struct srp_target_port {
174 struct srp_iu *rx_ring[SRP_RQ_SIZE]; 173 struct srp_iu *rx_ring[SRP_RQ_SIZE];
175 struct srp_request req_ring[SRP_CMD_SQ_SIZE]; 174 struct srp_request req_ring[SRP_CMD_SQ_SIZE];
176 175
177 struct work_struct remove_work; 176 struct work_struct work;
178 177
179 struct list_head list; 178 struct list_head list;
180 struct completion done; 179 struct completion done;
181 int status; 180 int status;
182 bool qp_in_error; 181 int qp_in_error;
183 182
184 struct completion tsk_mgmt_done; 183 struct completion tsk_mgmt_done;
185 u8 tsk_mgmt_status; 184 u8 tsk_mgmt_status;