diff options
author | Bart Van Assche <bvanassche@acm.org> | 2010-11-26 14:50:09 -0500 |
---|---|---|
committer | David Dillow <dillowda@ornl.gov> | 2011-01-10 15:44:50 -0500 |
commit | 94a9174c630c8465ed9e97ecd242993429930c05 (patch) | |
tree | 73ddbe0024d54bad543c90144576148b70922eea /drivers/infiniband | |
parent | 76c75b258f1fe6abac6af2356989ad4d6518886e (diff) |
IB/srp: reduce lock coverage of command completion
We only need the lock to cover list and credit manipulations, so push
those into srp_remove_req() and update the call chains.
We reorder the request removal and command completion in
srp_process_rsp() to avoid the SCSI mid-layer sending another command
before we've released our request and added any credits returned by the
target. This prevents us from returning HOST_BUSY unneccesarily.
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
[ broken out, small cleanups, and modified to avoid potential extraneous
HOST_BUSY returns by David Dillow ]
Signed-off-by: David Dillow <dillowda@ornl.gov>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 37 |
1 files changed, 14 insertions, 23 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e5bd181dbce5..e76fe54faeea 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -549,18 +549,24 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
549 | scsi_sg_count(scmnd), scmnd->sc_data_direction); | 549 | scsi_sg_count(scmnd), scmnd->sc_data_direction); |
550 | } | 550 | } |
551 | 551 | ||
552 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) | 552 | static void srp_remove_req(struct srp_target_port *target, |
553 | struct srp_request *req, s32 req_lim_delta) | ||
553 | { | 554 | { |
555 | unsigned long flags; | ||
556 | |||
554 | srp_unmap_data(req->scmnd, target, req); | 557 | srp_unmap_data(req->scmnd, target, req); |
558 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
559 | target->req_lim += req_lim_delta; | ||
555 | req->scmnd = NULL; | 560 | req->scmnd = NULL; |
556 | list_add_tail(&req->list, &target->free_reqs); | 561 | list_add_tail(&req->list, &target->free_reqs); |
562 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
557 | } | 563 | } |
558 | 564 | ||
559 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) | 565 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) |
560 | { | 566 | { |
561 | req->scmnd->result = DID_RESET << 16; | 567 | req->scmnd->result = DID_RESET << 16; |
562 | req->scmnd->scsi_done(req->scmnd); | 568 | req->scmnd->scsi_done(req->scmnd); |
563 | srp_remove_req(target, req); | 569 | srp_remove_req(target, req, 0); |
564 | } | 570 | } |
565 | 571 | ||
566 | static int srp_reconnect_target(struct srp_target_port *target) | 572 | static int srp_reconnect_target(struct srp_target_port *target) |
@@ -595,13 +601,11 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
595 | while (ib_poll_cq(target->send_cq, 1, &wc) > 0) | 601 | while (ib_poll_cq(target->send_cq, 1, &wc) > 0) |
596 | ; /* nothing */ | 602 | ; /* nothing */ |
597 | 603 | ||
598 | spin_lock_irq(target->scsi_host->host_lock); | ||
599 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 604 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
600 | struct srp_request *req = &target->req_ring[i]; | 605 | struct srp_request *req = &target->req_ring[i]; |
601 | if (req->scmnd) | 606 | if (req->scmnd) |
602 | srp_reset_req(target, req); | 607 | srp_reset_req(target, req); |
603 | } | 608 | } |
604 | spin_unlock_irq(target->scsi_host->host_lock); | ||
605 | 609 | ||
606 | INIT_LIST_HEAD(&target->free_tx); | 610 | INIT_LIST_HEAD(&target->free_tx); |
607 | for (i = 0; i < SRP_SQ_SIZE; ++i) | 611 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
@@ -914,15 +918,12 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
914 | struct srp_request *req; | 918 | struct srp_request *req; |
915 | struct scsi_cmnd *scmnd; | 919 | struct scsi_cmnd *scmnd; |
916 | unsigned long flags; | 920 | unsigned long flags; |
917 | s32 delta; | ||
918 | |||
919 | delta = (s32) be32_to_cpu(rsp->req_lim_delta); | ||
920 | |||
921 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
922 | |||
923 | target->req_lim += delta; | ||
924 | 921 | ||
925 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { | 922 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { |
923 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
924 | target->req_lim += be32_to_cpu(rsp->req_lim_delta); | ||
925 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
926 | |||
926 | target->tsk_mgmt_status = -1; | 927 | target->tsk_mgmt_status = -1; |
927 | if (be32_to_cpu(rsp->resp_data_len) >= 4) | 928 | if (be32_to_cpu(rsp->resp_data_len) >= 4) |
928 | target->tsk_mgmt_status = rsp->data[3]; | 929 | target->tsk_mgmt_status = rsp->data[3]; |
@@ -948,12 +949,10 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
948 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 949 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
949 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); | 950 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); |
950 | 951 | ||
952 | srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); | ||
951 | scmnd->host_scribble = NULL; | 953 | scmnd->host_scribble = NULL; |
952 | scmnd->scsi_done(scmnd); | 954 | scmnd->scsi_done(scmnd); |
953 | srp_remove_req(target, req); | ||
954 | } | 955 | } |
955 | |||
956 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
957 | } | 956 | } |
958 | 957 | ||
959 | static int srp_response_common(struct srp_target_port *target, s32 req_delta, | 958 | static int srp_response_common(struct srp_target_port *target, s32 req_delta, |
@@ -1498,18 +1497,14 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
1498 | SRP_TSK_ABORT_TASK)) | 1497 | SRP_TSK_ABORT_TASK)) |
1499 | return FAILED; | 1498 | return FAILED; |
1500 | 1499 | ||
1501 | spin_lock_irq(target->scsi_host->host_lock); | ||
1502 | |||
1503 | if (req->scmnd) { | 1500 | if (req->scmnd) { |
1504 | if (!target->tsk_mgmt_status) { | 1501 | if (!target->tsk_mgmt_status) { |
1505 | srp_remove_req(target, req); | 1502 | srp_remove_req(target, req, 0); |
1506 | scmnd->result = DID_ABORT << 16; | 1503 | scmnd->result = DID_ABORT << 16; |
1507 | } else | 1504 | } else |
1508 | ret = FAILED; | 1505 | ret = FAILED; |
1509 | } | 1506 | } |
1510 | 1507 | ||
1511 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1512 | |||
1513 | return ret; | 1508 | return ret; |
1514 | } | 1509 | } |
1515 | 1510 | ||
@@ -1528,16 +1523,12 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
1528 | if (target->tsk_mgmt_status) | 1523 | if (target->tsk_mgmt_status) |
1529 | return FAILED; | 1524 | return FAILED; |
1530 | 1525 | ||
1531 | spin_lock_irq(target->scsi_host->host_lock); | ||
1532 | |||
1533 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 1526 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
1534 | struct srp_request *req = &target->req_ring[i]; | 1527 | struct srp_request *req = &target->req_ring[i]; |
1535 | if (req->scmnd && req->scmnd->device == scmnd->device) | 1528 | if (req->scmnd && req->scmnd->device == scmnd->device) |
1536 | srp_reset_req(target, req); | 1529 | srp_reset_req(target, req); |
1537 | } | 1530 | } |
1538 | 1531 | ||
1539 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1540 | |||
1541 | return SUCCESS; | 1532 | return SUCCESS; |
1542 | } | 1533 | } |
1543 | 1534 | ||