aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2012-07-17 09:13:52 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-08-24 05:10:27 -0400
commit00bfef2cc1c6b5b3f1baa8f56f5f9a2b10ed1a52 (patch)
treed1a74b07882271cf1542121d4d01da34723c78d6 /drivers/scsi/ipr.c
parent3013d91831a04ec007431105e647298e8cf91611 (diff)
[SCSI] ipr: Reduce queuecommand lock time
Reduce the amount of time the host lock is held in queuecommand for improved performance. [jejb: fix up checkpatch noise] Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c90
1 files changed, 63 insertions, 27 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dacc784ad2d6..b2994e2cf017 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -620,25 +620,39 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
620} 620}
621 621
622/** 622/**
623 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block 623 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
624 * @ioa_cfg: ioa config struct 624 * @ioa_cfg: ioa config struct
625 * 625 *
626 * Return value: 626 * Return value:
627 * pointer to ipr command struct 627 * pointer to ipr command struct
628 **/ 628 **/
629static 629static
630struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 630struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
631{ 631{
632 struct ipr_cmnd *ipr_cmd; 632 struct ipr_cmnd *ipr_cmd;
633 633
634 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); 634 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
635 list_del(&ipr_cmd->queue); 635 list_del(&ipr_cmd->queue);
636 ipr_init_ipr_cmnd(ipr_cmd);
637 636
638 return ipr_cmd; 637 return ipr_cmd;
639} 638}
640 639
641/** 640/**
641 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
642 * @ioa_cfg: ioa config struct
643 *
644 * Return value:
645 * pointer to ipr command struct
646 **/
647static
648struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
649{
650 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
651 ipr_init_ipr_cmnd(ipr_cmd);
652 return ipr_cmd;
653}
654
655/**
642 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 656 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
643 * @ioa_cfg: ioa config struct 657 * @ioa_cfg: ioa config struct
644 * @clr_ints: interrupts to clear 658 * @clr_ints: interrupts to clear
@@ -5783,8 +5797,8 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5783 5797
5784/** 5798/**
5785 * ipr_queuecommand - Queue a mid-layer request 5799 * ipr_queuecommand - Queue a mid-layer request
5800 * @shost: scsi host struct
5786 * @scsi_cmd: scsi command struct 5801 * @scsi_cmd: scsi command struct
5787 * @done: done function
5788 * 5802 *
5789 * This function queues a request generated by the mid-layer. 5803 * This function queues a request generated by the mid-layer.
5790 * 5804 *
@@ -5793,61 +5807,58 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5793 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 5807 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5794 * SCSI_MLQUEUE_HOST_BUSY if host is busy 5808 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5795 **/ 5809 **/
5796static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, 5810static int ipr_queuecommand(struct Scsi_Host *shost,
5797 void (*done) (struct scsi_cmnd *)) 5811 struct scsi_cmnd *scsi_cmd)
5798{ 5812{
5799 struct ipr_ioa_cfg *ioa_cfg; 5813 struct ipr_ioa_cfg *ioa_cfg;
5800 struct ipr_resource_entry *res; 5814 struct ipr_resource_entry *res;
5801 struct ipr_ioarcb *ioarcb; 5815 struct ipr_ioarcb *ioarcb;
5802 struct ipr_cmnd *ipr_cmd; 5816 struct ipr_cmnd *ipr_cmd;
5817 unsigned long lock_flags;
5803 int rc = 0; 5818 int rc = 0;
5804 5819
5805 scsi_cmd->scsi_done = done; 5820 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
5806 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5821
5807 res = scsi_cmd->device->hostdata; 5822 spin_lock_irqsave(shost->host_lock, lock_flags);
5808 scsi_cmd->result = (DID_OK << 16); 5823 scsi_cmd->result = (DID_OK << 16);
5824 res = scsi_cmd->device->hostdata;
5809 5825
5810 /* 5826 /*
5811 * We are currently blocking all devices due to a host reset 5827 * We are currently blocking all devices due to a host reset
5812 * We have told the host to stop giving us new requests, but 5828 * We have told the host to stop giving us new requests, but
5813 * ERP ops don't count. FIXME 5829 * ERP ops don't count. FIXME
5814 */ 5830 */
5815 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) 5831 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
5832 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5816 return SCSI_MLQUEUE_HOST_BUSY; 5833 return SCSI_MLQUEUE_HOST_BUSY;
5834 }
5817 5835
5818 /* 5836 /*
5819 * FIXME - Create scsi_set_host_offline interface 5837 * FIXME - Create scsi_set_host_offline interface
5820 * and the ioa_is_dead check can be removed 5838 * and the ioa_is_dead check can be removed
5821 */ 5839 */
5822 if (unlikely(ioa_cfg->ioa_is_dead || !res)) { 5840 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5823 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 5841 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5824 scsi_cmd->result = (DID_NO_CONNECT << 16); 5842 goto err_nodev;
5825 scsi_cmd->scsi_done(scsi_cmd);
5826 return 0;
5827 } 5843 }
5828 5844
5829 if (ipr_is_gata(res) && res->sata_port) 5845 if (ipr_is_gata(res) && res->sata_port)
5830 return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 5846 return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5831 5847
5832 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5848 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
5849 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5850
5851 ipr_init_ipr_cmnd(ipr_cmd);
5833 ioarcb = &ipr_cmd->ioarcb; 5852 ioarcb = &ipr_cmd->ioarcb;
5834 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5835 5853
5836 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5854 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5837 ipr_cmd->scsi_cmd = scsi_cmd; 5855 ipr_cmd->scsi_cmd = scsi_cmd;
5838 ioarcb->res_handle = res->res_handle;
5839 ipr_cmd->done = ipr_scsi_done; 5856 ipr_cmd->done = ipr_scsi_done;
5840 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5841 5857
5842 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5858 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5843 if (scsi_cmd->underflow == 0) 5859 if (scsi_cmd->underflow == 0)
5844 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5860 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5845 5861
5846 if (res->needs_sync_complete) {
5847 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5848 res->needs_sync_complete = 0;
5849 }
5850
5851 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5862 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5852 if (ipr_is_gscsi(res)) 5863 if (ipr_is_gscsi(res))
5853 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 5864 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
@@ -5866,16 +5877,41 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5866 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 5877 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5867 } 5878 }
5868 5879
5869 if (unlikely(rc != 0)) { 5880 spin_lock_irqsave(shost->host_lock, lock_flags);
5870 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5881 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
5882 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5883 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5884 if (!rc)
5885 scsi_dma_unmap(scsi_cmd);
5871 return SCSI_MLQUEUE_HOST_BUSY; 5886 return SCSI_MLQUEUE_HOST_BUSY;
5872 } 5887 }
5873 5888
5889 if (unlikely(ioa_cfg->ioa_is_dead)) {
5890 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5891 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5892 scsi_dma_unmap(scsi_cmd);
5893 goto err_nodev;
5894 }
5895
5896 ioarcb->res_handle = res->res_handle;
5897 if (res->needs_sync_complete) {
5898 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5899 res->needs_sync_complete = 0;
5900 }
5901 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5902 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5874 ipr_send_command(ipr_cmd); 5903 ipr_send_command(ipr_cmd);
5904 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5875 return 0; 5905 return 0;
5876}
5877 5906
5878static DEF_SCSI_QCMD(ipr_queuecommand) 5907err_nodev:
5908 spin_lock_irqsave(shost->host_lock, lock_flags);
5909 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5910 scsi_cmd->result = (DID_NO_CONNECT << 16);
5911 scsi_cmd->scsi_done(scsi_cmd);
5912 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5913 return 0;
5914}
5879 5915
5880/** 5916/**
5881 * ipr_ioctl - IOCTL handler 5917 * ipr_ioctl - IOCTL handler