aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2012-07-17 09:14:40 -0400
committerJames Bottomley <JBottomley@Parallels.com>2012-08-24 05:10:28 -0400
commit172cd6e1877751f003691c2439a1369bfbf6afa5 (patch)
tree138d0949aaf845ad4f449b16a41c60f5b22f56e0 /drivers/scsi/ipr.c
parent00bfef2cc1c6b5b3f1baa8f56f5f9a2b10ed1a52 (diff)
[SCSI] ipr: Reduce interrupt lock time
Reduce the amount of time the host lock is held in the interrupt handler for improved performance. [jejb: fix up checkpatch noise] Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c60
1 files changed, 46 insertions, 14 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b2994e2cf017..07b14ba6906a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -566,6 +566,23 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
566#endif 566#endif
567 567
568/** 568/**
569 * ipr_lock_and_done - Acquire lock and complete command
570 * @ipr_cmd: ipr command struct
571 *
572 * Return value:
573 * none
574 **/
575static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
576{
577 unsigned long lock_flags;
578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
579
580 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
581 ipr_cmd->done(ipr_cmd);
582 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
583}
584
585/**
569 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse 586 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
570 * @ipr_cmd: ipr command struct 587 * @ipr_cmd: ipr command struct
571 * 588 *
@@ -611,11 +628,13 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
611 * Return value: 628 * Return value:
612 * none 629 * none
613 **/ 630 **/
614static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 631static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
632 void (*fast_done) (struct ipr_cmnd *))
615{ 633{
616 ipr_reinit_ipr_cmnd(ipr_cmd); 634 ipr_reinit_ipr_cmnd(ipr_cmd);
617 ipr_cmd->u.scratch = 0; 635 ipr_cmd->u.scratch = 0;
618 ipr_cmd->sibling = NULL; 636 ipr_cmd->sibling = NULL;
637 ipr_cmd->fast_done = fast_done;
619 init_timer(&ipr_cmd->timer); 638 init_timer(&ipr_cmd->timer);
620} 639}
621 640
@@ -648,7 +667,7 @@ static
648struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 667struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
649{ 668{
650 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); 669 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
651 ipr_init_ipr_cmnd(ipr_cmd); 670 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
652 return ipr_cmd; 671 return ipr_cmd;
653} 672}
654 673
@@ -5130,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5130 u16 cmd_index; 5149 u16 cmd_index;
5131 int num_hrrq = 0; 5150 int num_hrrq = 0;
5132 int irq_none = 0; 5151 int irq_none = 0;
5133 struct ipr_cmnd *ipr_cmd; 5152 struct ipr_cmnd *ipr_cmd, *temp;
5134 irqreturn_t rc = IRQ_NONE; 5153 irqreturn_t rc = IRQ_NONE;
5154 LIST_HEAD(doneq);
5135 5155
5136 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5137 5157
@@ -5152,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5152 5172
5153 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { 5173 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5154 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA"); 5174 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5175 rc = IRQ_HANDLED;
5156 return IRQ_HANDLED; 5176 goto unlock_out;
5157 } 5177 }
5158 5178
5159 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5179 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
@@ -5162,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5162 5182
5163 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5183 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5164 5184
5165 list_del(&ipr_cmd->queue); 5185 list_move_tail(&ipr_cmd->queue, &doneq);
5166 del_timer(&ipr_cmd->timer);
5167 ipr_cmd->done(ipr_cmd);
5168 5186
5169 rc = IRQ_HANDLED; 5187 rc = IRQ_HANDLED;
5170 5188
@@ -5194,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5194 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5212 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5195 int_reg & IPR_PCII_HRRQ_UPDATED) { 5213 int_reg & IPR_PCII_HRRQ_UPDATED) {
5196 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); 5214 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5215 rc = IRQ_HANDLED;
5198 return IRQ_HANDLED; 5216 goto unlock_out;
5199 } else 5217 } else
5200 break; 5218 break;
5201 } 5219 }
@@ -5203,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5203 if (unlikely(rc == IRQ_NONE)) 5221 if (unlikely(rc == IRQ_NONE))
5204 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5222 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5205 5223
5224unlock_out:
5206 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5226 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5227 list_del(&ipr_cmd->queue);
5228 del_timer(&ipr_cmd->timer);
5229 ipr_cmd->fast_done(ipr_cmd);
5230 }
5231
5207 return rc; 5232 return rc;
5208} 5233}
5209 5234
@@ -5784,15 +5809,22 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5784 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5785 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5810 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5786 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5811 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5812 unsigned long lock_flags;
5787 5813
5788 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 5814 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5789 5815
5790 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 5816 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5791 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5817 scsi_dma_unmap(scsi_cmd);
5818
5819 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5792 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5820 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5793 scsi_cmd->scsi_done(scsi_cmd); 5821 scsi_cmd->scsi_done(scsi_cmd);
5794 } else 5822 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5823 } else {
5824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5795 ipr_erp_start(ioa_cfg, ipr_cmd); 5825 ipr_erp_start(ioa_cfg, ipr_cmd);
5826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5827 }
5796} 5828}
5797 5829
5798/** 5830/**
@@ -5848,12 +5880,12 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5848 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); 5880 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
5849 spin_unlock_irqrestore(shost->host_lock, lock_flags); 5881 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5850 5882
5851 ipr_init_ipr_cmnd(ipr_cmd); 5883 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
5852 ioarcb = &ipr_cmd->ioarcb; 5884 ioarcb = &ipr_cmd->ioarcb;
5853 5885
5854 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5886 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5855 ipr_cmd->scsi_cmd = scsi_cmd; 5887 ipr_cmd->scsi_cmd = scsi_cmd;
5856 ipr_cmd->done = ipr_scsi_done; 5888 ipr_cmd->done = ipr_scsi_eh_done;
5857 5889
5858 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5890 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5859 if (scsi_cmd->underflow == 0) 5891 if (scsi_cmd->underflow == 0)