aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorDavid Dillow <dillowda@ornl.gov>2010-11-26 13:02:21 -0500
committerDavid Dillow <dillowda@ornl.gov>2011-01-05 15:24:25 -0500
commitf8b6e31e4e46bf514c27fce38783ed5615cca01d (patch)
tree0ae9afcb65749e36034b6de03abe5fd51c5dc7b0 /drivers/infiniband/ulp
parent3c0eee3fe6a3a1c745379547c7e7c904aa64f6d5 (diff)
IB/srp: allow task management without a previous request
We can only have one task management comment outstanding, so move the completion and status to the target port. This allows us to handle resets of a LUN without a corresponding request having been sent. Meanwhile, we don't need to play games with host_scribble, just use it as the pointer it is. This fixes a crash when we issue a bus reset using sg_reset. Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=13893 Reported-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: David Dillow <dillowda@ornl.gov>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c90
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h10
2 files changed, 37 insertions, 63 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347a7715..29429a13fd90 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -542,6 +542,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
542static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 542static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
543{ 543{
544 srp_unmap_data(req->scmnd, target, req); 544 srp_unmap_data(req->scmnd, target, req);
545 req->scmnd = NULL;
545 list_move_tail(&req->list, &target->free_reqs); 546 list_move_tail(&req->list, &target->free_reqs);
546} 547}
547 548
@@ -925,15 +926,13 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
925 926
926 target->req_lim += delta; 927 target->req_lim += delta;
927 928
928 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
929
930 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 929 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
931 if (be32_to_cpu(rsp->resp_data_len) < 4) 930 target->tsk_mgmt_status = -1;
932 req->tsk_status = -1; 931 if (be32_to_cpu(rsp->resp_data_len) >= 4)
933 else 932 target->tsk_mgmt_status = rsp->data[3];
934 req->tsk_status = rsp->data[3]; 933 complete(&target->tsk_mgmt_done);
935 complete(&req->done);
936 } else { 934 } else {
935 req = &target->req_ring[rsp->tag];
937 scmnd = req->scmnd; 936 scmnd = req->scmnd;
938 if (!scmnd) 937 if (!scmnd)
939 shost_printk(KERN_ERR, target->scsi_host, 938 shost_printk(KERN_ERR, target->scsi_host,
@@ -953,13 +952,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
953 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 952 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
954 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 953 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
955 954
956 if (!req->tsk_mgmt) { 955 scmnd->host_scribble = NULL;
957 scmnd->host_scribble = (void *) -1L; 956 scmnd->scsi_done(scmnd);
958 scmnd->scsi_done(scmnd); 957 srp_remove_req(target, req);
959
960 srp_remove_req(target, req);
961 } else
962 req->cmd_done = 1;
963 } 958 }
964 959
965 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 960 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
@@ -1155,7 +1150,7 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1155 1150
1156 scmnd->scsi_done = done; 1151 scmnd->scsi_done = done;
1157 scmnd->result = 0; 1152 scmnd->result = 0;
1158 scmnd->host_scribble = (void *) (long) req->index; 1153 scmnd->host_scribble = (void *) req;
1159 1154
1160 cmd = iu->buf; 1155 cmd = iu->buf;
1161 memset(cmd, 0, sizeof *cmd); 1156 memset(cmd, 0, sizeof *cmd);
@@ -1167,8 +1162,6 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1167 1162
1168 req->scmnd = scmnd; 1163 req->scmnd = scmnd;
1169 req->cmd = iu; 1164 req->cmd = iu;
1170 req->cmd_done = 0;
1171 req->tsk_mgmt = NULL;
1172 1165
1173 len = srp_map_data(scmnd, target, req); 1166 len = srp_map_data(scmnd, target, req);
1174 if (len < 0) { 1167 if (len < 0) {
@@ -1442,7 +1435,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1442} 1435}
1443 1436
1444static int srp_send_tsk_mgmt(struct srp_target_port *target, 1437static int srp_send_tsk_mgmt(struct srp_target_port *target,
1445 struct srp_request *req, u8 func) 1438 u64 req_tag, unsigned int lun, u8 func)
1446{ 1439{
1447 struct ib_device *dev = target->srp_host->srp_dev->dev; 1440 struct ib_device *dev = target->srp_host->srp_dev->dev;
1448 struct srp_iu *iu; 1441 struct srp_iu *iu;
@@ -1451,12 +1444,10 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1451 spin_lock_irq(target->scsi_host->host_lock); 1444 spin_lock_irq(target->scsi_host->host_lock);
1452 1445
1453 if (target->state == SRP_TARGET_DEAD || 1446 if (target->state == SRP_TARGET_DEAD ||
1454 target->state == SRP_TARGET_REMOVED) { 1447 target->state == SRP_TARGET_REMOVED)
1455 req->scmnd->result = DID_BAD_TARGET << 16;
1456 goto out; 1448 goto out;
1457 }
1458 1449
1459 init_completion(&req->done); 1450 init_completion(&target->tsk_mgmt_done);
1460 1451
1461 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 1452 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1462 if (!iu) 1453 if (!iu)
@@ -1468,21 +1459,19 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1468 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1459 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1469 1460
1470 tsk_mgmt->opcode = SRP_TSK_MGMT; 1461 tsk_mgmt->opcode = SRP_TSK_MGMT;
1471 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1462 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1472 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1463 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1473 tsk_mgmt->tsk_mgmt_func = func; 1464 tsk_mgmt->tsk_mgmt_func = func;
1474 tsk_mgmt->task_tag = req->index; 1465 tsk_mgmt->task_tag = req_tag;
1475 1466
1476 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 1467 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1477 DMA_TO_DEVICE); 1468 DMA_TO_DEVICE);
1478 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1469 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1479 goto out; 1470 goto out;
1480 1471
1481 req->tsk_mgmt = iu;
1482
1483 spin_unlock_irq(target->scsi_host->host_lock); 1472 spin_unlock_irq(target->scsi_host->host_lock);
1484 1473
1485 if (!wait_for_completion_timeout(&req->done, 1474 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1486 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1475 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1487 return -1; 1476 return -1;
1488 1477
@@ -1493,43 +1482,29 @@ out:
1493 return -1; 1482 return -1;
1494} 1483}
1495 1484
1496static int srp_find_req(struct srp_target_port *target,
1497 struct scsi_cmnd *scmnd,
1498 struct srp_request **req)
1499{
1500 if (scmnd->host_scribble == (void *) -1L)
1501 return -1;
1502
1503 *req = &target->req_ring[(long) scmnd->host_scribble];
1504
1505 return 0;
1506}
1507
1508static int srp_abort(struct scsi_cmnd *scmnd) 1485static int srp_abort(struct scsi_cmnd *scmnd)
1509{ 1486{
1510 struct srp_target_port *target = host_to_target(scmnd->device->host); 1487 struct srp_target_port *target = host_to_target(scmnd->device->host);
1511 struct srp_request *req; 1488 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1512 int ret = SUCCESS; 1489 int ret = SUCCESS;
1513 1490
1514 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1491 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1515 1492
1516 if (target->qp_in_error) 1493 if (!req || target->qp_in_error)
1517 return FAILED;
1518 if (srp_find_req(target, scmnd, &req))
1519 return FAILED; 1494 return FAILED;
1520 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1495 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1496 SRP_TSK_ABORT_TASK))
1521 return FAILED; 1497 return FAILED;
1522 1498
1523 spin_lock_irq(target->scsi_host->host_lock); 1499 spin_lock_irq(target->scsi_host->host_lock);
1524 1500
1525 if (req->cmd_done) { 1501 if (req->scmnd) {
1526 srp_remove_req(target, req); 1502 if (!target->tsk_mgmt_status) {
1527 scmnd->scsi_done(scmnd); 1503 srp_remove_req(target, req);
1528 } else if (!req->tsk_status) { 1504 scmnd->result = DID_ABORT << 16;
1529 srp_remove_req(target, req); 1505 } else
1530 scmnd->result = DID_ABORT << 16; 1506 ret = FAILED;
1531 } else 1507 }
1532 ret = FAILED;
1533 1508
1534 spin_unlock_irq(target->scsi_host->host_lock); 1509 spin_unlock_irq(target->scsi_host->host_lock);
1535 1510
@@ -1545,17 +1520,16 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1545 1520
1546 if (target->qp_in_error) 1521 if (target->qp_in_error)
1547 return FAILED; 1522 return FAILED;
1548 if (srp_find_req(target, scmnd, &req)) 1523 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1549 return FAILED; 1524 SRP_TSK_LUN_RESET))
1550 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1551 return FAILED; 1525 return FAILED;
1552 if (req->tsk_status) 1526 if (target->tsk_mgmt_status)
1553 return FAILED; 1527 return FAILED;
1554 1528
1555 spin_lock_irq(target->scsi_host->host_lock); 1529 spin_lock_irq(target->scsi_host->host_lock);
1556 1530
1557 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1531 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1558 if (req->scmnd->device == scmnd->device) 1532 if (req->scmnd && req->scmnd->device == scmnd->device)
1559 srp_reset_req(target, req); 1533 srp_reset_req(target, req);
1560 1534
1561 spin_unlock_irq(target->scsi_host->host_lock); 1535 spin_unlock_irq(target->scsi_host->host_lock);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9e479f..f8b689a644b7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -68,7 +68,8 @@ enum {
68 SRP_TSK_MGMT_SQ_SIZE = 1, 68 SRP_TSK_MGMT_SQ_SIZE = 1,
69 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, 69 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
70 70
71 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 71 SRP_TAG_NO_REQ = ~0U,
72 SRP_TAG_TSK_MGMT = 1U << 31,
72 73
73 SRP_FMR_SIZE = 256, 74 SRP_FMR_SIZE = 256,
74 SRP_FMR_POOL_SIZE = 1024, 75 SRP_FMR_POOL_SIZE = 1024,
@@ -113,12 +114,8 @@ struct srp_request {
113 struct list_head list; 114 struct list_head list;
114 struct scsi_cmnd *scmnd; 115 struct scsi_cmnd *scmnd;
115 struct srp_iu *cmd; 116 struct srp_iu *cmd;
116 struct srp_iu *tsk_mgmt;
117 struct ib_pool_fmr *fmr; 117 struct ib_pool_fmr *fmr;
118 struct completion done;
119 short index; 118 short index;
120 u8 cmd_done;
121 u8 tsk_status;
122}; 119};
123 120
124struct srp_target_port { 121struct srp_target_port {
@@ -165,6 +162,9 @@ struct srp_target_port {
165 int status; 162 int status;
166 enum srp_target_state state; 163 enum srp_target_state state;
167 int qp_in_error; 164 int qp_in_error;
165
166 struct completion tsk_mgmt_done;
167 u8 tsk_mgmt_status;
168}; 168};
169 169
170struct srp_iu { 170struct srp_iu {