aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorwenxiong@linux.vnet.ibm.com <wenxiong@linux.vnet.ibm.com>2013-01-11 18:43:51 -0500
committerJames Bottomley <JBottomley@Parallels.com>2013-01-29 18:48:52 -0500
commit56d6aa33d3f68471466cb183d6e04b508dfb296f (patch)
tree3c13bfb4312535390b613ef31044f1af9ff9a619 /drivers/scsi/ipr.c
parent05a6538a9a204999e0c0f7faee00b81b334f4fc7 (diff)
[SCSI] ipr: Reduce lock contention
This patch reduces lock contention while implementing distributed completion processing. Signed-off-by: Wen Xiong <wenxiong@linux.vnet.ibm.com> Acked-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c323
1 files changed, 228 insertions, 95 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b8d759181611..f1eea5df54c6 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -552,7 +552,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
552 struct ipr_trace_entry *trace_entry; 552 struct ipr_trace_entry *trace_entry;
553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
554 554
555 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++]; 555 trace_entry = &ioa_cfg->trace[atomic_add_return
556 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
556 trace_entry->time = jiffies; 557 trace_entry->time = jiffies;
557 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 558 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
558 trace_entry->type = type; 559 trace_entry->type = type;
@@ -563,6 +564,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
563 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 564 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
564 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 565 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
565 trace_entry->u.add_data = add_data; 566 trace_entry->u.add_data = add_data;
567 wmb();
566} 568}
567#else 569#else
568#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) 570#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
@@ -697,9 +699,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
697 u32 clr_ints) 699 u32 clr_ints)
698{ 700{
699 volatile u32 int_reg; 701 volatile u32 int_reg;
702 int i;
700 703
701 /* Stop new interrupts */ 704 /* Stop new interrupts */
702 ioa_cfg->allow_interrupts = 0; 705 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
706 spin_lock(&ioa_cfg->hrrq[i]._lock);
707 ioa_cfg->hrrq[i].allow_interrupts = 0;
708 spin_unlock(&ioa_cfg->hrrq[i]._lock);
709 }
710 wmb();
703 711
704 /* Set interrupt mask to stop all new interrupts */ 712 /* Set interrupt mask to stop all new interrupts */
705 if (ioa_cfg->sis64) 713 if (ioa_cfg->sis64)
@@ -818,6 +826,7 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
818 826
819 ENTER; 827 ENTER;
820 for_each_hrrq(hrrq, ioa_cfg) { 828 for_each_hrrq(hrrq, ioa_cfg) {
829 spin_lock(&hrrq->_lock);
821 list_for_each_entry_safe(ipr_cmd, 830 list_for_each_entry_safe(ipr_cmd,
822 temp, &hrrq->hrrq_pending_q, queue) { 831 temp, &hrrq->hrrq_pending_q, queue) {
823 list_del(&ipr_cmd->queue); 832 list_del(&ipr_cmd->queue);
@@ -837,6 +846,7 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
837 del_timer(&ipr_cmd->timer); 846 del_timer(&ipr_cmd->timer);
838 ipr_cmd->done(ipr_cmd); 847 ipr_cmd->done(ipr_cmd);
839 } 848 }
849 spin_unlock(&hrrq->_lock);
840 } 850 }
841 LEAVE; 851 LEAVE;
842} 852}
@@ -991,12 +1001,9 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
991static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1001static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
992{ 1002{
993 if (ioa_cfg->hrrq_num == 1) 1003 if (ioa_cfg->hrrq_num == 1)
994 ioa_cfg->hrrq_index = 0; 1004 return 0;
995 else { 1005 else
996 if (++ioa_cfg->hrrq_index >= ioa_cfg->hrrq_num) 1006 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
997 ioa_cfg->hrrq_index = 1;
998 }
999 return ioa_cfg->hrrq_index;
1000} 1007}
1001 1008
1002/** 1009/**
@@ -1018,7 +1025,7 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1018 struct ipr_cmnd *ipr_cmd; 1025 struct ipr_cmnd *ipr_cmd;
1019 struct ipr_ioarcb *ioarcb; 1026 struct ipr_ioarcb *ioarcb;
1020 1027
1021 if (ioa_cfg->allow_cmds) { 1028 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1022 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 1029 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1023 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 1030 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1024 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 1031 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
@@ -2564,7 +2571,7 @@ static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2564 2571
2565 /* If we got hit with a host reset while we were already resetting 2572 /* If we got hit with a host reset while we were already resetting
2566 the adapter for some reason, and the reset failed. */ 2573 the adapter for some reason, and the reset failed. */
2567 if (ioa_cfg->ioa_is_dead) { 2574 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
2568 ipr_trace; 2575 ipr_trace;
2569 return FAILED; 2576 return FAILED;
2570 } 2577 }
@@ -3205,7 +3212,8 @@ static void ipr_worker_thread(struct work_struct *work)
3205restart: 3212restart:
3206 do { 3213 do {
3207 did_work = 0; 3214 did_work = 0;
3208 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) { 3215 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3216 !ioa_cfg->allow_ml_add_del) {
3209 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3217 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3210 return; 3218 return;
3211 } 3219 }
@@ -3453,7 +3461,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
3453 int len; 3461 int len;
3454 3462
3455 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3463 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3456 if (ioa_cfg->ioa_is_dead) 3464 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3457 len = snprintf(buf, PAGE_SIZE, "offline\n"); 3465 len = snprintf(buf, PAGE_SIZE, "offline\n");
3458 else 3466 else
3459 len = snprintf(buf, PAGE_SIZE, "online\n"); 3467 len = snprintf(buf, PAGE_SIZE, "online\n");
@@ -3479,14 +3487,20 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
3479 struct Scsi_Host *shost = class_to_shost(dev); 3487 struct Scsi_Host *shost = class_to_shost(dev);
3480 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 3488 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3481 unsigned long lock_flags; 3489 unsigned long lock_flags;
3482 int result = count; 3490 int result = count, i;
3483 3491
3484 if (!capable(CAP_SYS_ADMIN)) 3492 if (!capable(CAP_SYS_ADMIN))
3485 return -EACCES; 3493 return -EACCES;
3486 3494
3487 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3495 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3488 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) { 3496 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3489 ioa_cfg->ioa_is_dead = 0; 3497 !strncmp(buf, "online", 6)) {
3498 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3499 spin_lock(&ioa_cfg->hrrq[i]._lock);
3500 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3501 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3502 }
3503 wmb();
3490 ioa_cfg->reset_retries = 0; 3504 ioa_cfg->reset_retries = 0;
3491 ioa_cfg->in_ioa_bringdown = 0; 3505 ioa_cfg->in_ioa_bringdown = 0;
3492 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3506 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -4066,7 +4080,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4066 4080
4067 ioa_cfg->dump = dump; 4081 ioa_cfg->dump = dump;
4068 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 4082 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4069 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) { 4083 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4070 ioa_cfg->dump_taken = 1; 4084 ioa_cfg->dump_taken = 1;
4071 schedule_work(&ioa_cfg->work_q); 4085 schedule_work(&ioa_cfg->work_q);
4072 } 4086 }
@@ -4861,10 +4875,11 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4861 */ 4875 */
4862 if (ioa_cfg->in_reset_reload) 4876 if (ioa_cfg->in_reset_reload)
4863 return FAILED; 4877 return FAILED;
4864 if (ioa_cfg->ioa_is_dead) 4878 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4865 return FAILED; 4879 return FAILED;
4866 4880
4867 for_each_hrrq(hrrq, ioa_cfg) { 4881 for_each_hrrq(hrrq, ioa_cfg) {
4882 spin_lock(&hrrq->_lock);
4868 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 4883 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4869 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4884 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4870 if (ipr_cmd->scsi_cmd) 4885 if (ipr_cmd->scsi_cmd)
@@ -4878,6 +4893,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4878 } 4893 }
4879 } 4894 }
4880 } 4895 }
4896 spin_unlock(&hrrq->_lock);
4881 } 4897 }
4882 res->resetting_device = 1; 4898 res->resetting_device = 1;
4883 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 4899 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
@@ -4889,6 +4905,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4889 spin_lock_irq(scsi_cmd->device->host->host_lock); 4905 spin_lock_irq(scsi_cmd->device->host->host_lock);
4890 4906
4891 for_each_hrrq(hrrq, ioa_cfg) { 4907 for_each_hrrq(hrrq, ioa_cfg) {
4908 spin_lock(&hrrq->_lock);
4892 list_for_each_entry(ipr_cmd, 4909 list_for_each_entry(ipr_cmd,
4893 &hrrq->hrrq_pending_q, queue) { 4910 &hrrq->hrrq_pending_q, queue) {
4894 if (ipr_cmd->ioarcb.res_handle == 4911 if (ipr_cmd->ioarcb.res_handle ==
@@ -4897,6 +4914,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4897 break; 4914 break;
4898 } 4915 }
4899 } 4916 }
4917 spin_unlock(&hrrq->_lock);
4900 } 4918 }
4901 } else 4919 } else
4902 rc = ipr_device_reset(ioa_cfg, res); 4920 rc = ipr_device_reset(ioa_cfg, res);
@@ -5020,7 +5038,8 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5020 * This will force the mid-layer to call ipr_eh_host_reset, 5038 * This will force the mid-layer to call ipr_eh_host_reset,
5021 * which will then go to sleep and wait for the reset to complete 5039 * which will then go to sleep and wait for the reset to complete
5022 */ 5040 */
5023 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) 5041 if (ioa_cfg->in_reset_reload ||
5042 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5024 return FAILED; 5043 return FAILED;
5025 if (!res) 5044 if (!res)
5026 return FAILED; 5045 return FAILED;
@@ -5036,6 +5055,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5036 return FAILED; 5055 return FAILED;
5037 5056
5038 for_each_hrrq(hrrq, ioa_cfg) { 5057 for_each_hrrq(hrrq, ioa_cfg) {
5058 spin_lock(&hrrq->_lock);
5039 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 5059 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5040 if (ipr_cmd->scsi_cmd == scsi_cmd) { 5060 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5041 ipr_cmd->done = ipr_scsi_eh_done; 5061 ipr_cmd->done = ipr_scsi_eh_done;
@@ -5043,6 +5063,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5043 break; 5063 break;
5044 } 5064 }
5045 } 5065 }
5066 spin_unlock(&hrrq->_lock);
5046 } 5067 }
5047 5068
5048 if (!op_found) 5069 if (!op_found)
@@ -5112,6 +5133,7 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5112{ 5133{
5113 irqreturn_t rc = IRQ_HANDLED; 5134 irqreturn_t rc = IRQ_HANDLED;
5114 u32 int_mask_reg; 5135 u32 int_mask_reg;
5136
5115 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 5137 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5116 int_reg &= ~int_mask_reg; 5138 int_reg &= ~int_mask_reg;
5117 5139
@@ -5173,6 +5195,7 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5173 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 5195 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5174 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5196 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5175 } 5197 }
5198
5176 return rc; 5199 return rc;
5177} 5200}
5178 5201
@@ -5205,7 +5228,7 @@ static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
5205 int num_hrrq = 0; 5228 int num_hrrq = 0;
5206 5229
5207 /* If interrupts are disabled, ignore the interrupt */ 5230 /* If interrupts are disabled, ignore the interrupt */
5208 if (!ioa_cfg->allow_interrupts) 5231 if (!hrr_queue->allow_interrupts)
5209 return 0; 5232 return 0;
5210 5233
5211 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5234 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
@@ -5252,7 +5275,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5252{ 5275{
5253 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5276 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5254 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5277 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5255 unsigned long lock_flags = 0; 5278 unsigned long hrrq_flags = 0;
5256 u32 int_reg = 0; 5279 u32 int_reg = 0;
5257 u32 ioasc; 5280 u32 ioasc;
5258 u16 cmd_index; 5281 u16 cmd_index;
@@ -5262,10 +5285,10 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5262 irqreturn_t rc = IRQ_NONE; 5285 irqreturn_t rc = IRQ_NONE;
5263 LIST_HEAD(doneq); 5286 LIST_HEAD(doneq);
5264 5287
5265 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5288 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5266 /* If interrupts are disabled, ignore the interrupt */ 5289 /* If interrupts are disabled, ignore the interrupt */
5267 if (!ioa_cfg->allow_interrupts) { 5290 if (!hrrq->allow_interrupts) {
5268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5291 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5269 return IRQ_NONE; 5292 return IRQ_NONE;
5270 } 5293 }
5271 5294
@@ -5332,7 +5355,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5332 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5355 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5333 5356
5334unlock_out: 5357unlock_out:
5335 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5358 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5336 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5359 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5337 list_del(&ipr_cmd->queue); 5360 list_del(&ipr_cmd->queue);
5338 del_timer(&ipr_cmd->timer); 5361 del_timer(&ipr_cmd->timer);
@@ -5352,17 +5375,16 @@ unlock_out:
5352static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) 5375static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5353{ 5376{
5354 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; 5377 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5355 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; 5378 unsigned long hrrq_flags = 0;
5356 unsigned long lock_flags = 0;
5357 struct ipr_cmnd *ipr_cmd, *temp; 5379 struct ipr_cmnd *ipr_cmd, *temp;
5358 irqreturn_t rc = IRQ_NONE; 5380 irqreturn_t rc = IRQ_NONE;
5359 LIST_HEAD(doneq); 5381 LIST_HEAD(doneq);
5360 5382
5361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5383 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5362 5384
5363 /* If interrupts are disabled, ignore the interrupt */ 5385 /* If interrupts are disabled, ignore the interrupt */
5364 if (!ioa_cfg->allow_interrupts) { 5386 if (!hrrq->allow_interrupts) {
5365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5387 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5366 return IRQ_NONE; 5388 return IRQ_NONE;
5367 } 5389 }
5368 5390
@@ -5372,7 +5394,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5372 if (__ipr_process_hrrq(hrrq, &doneq)) 5394 if (__ipr_process_hrrq(hrrq, &doneq))
5373 rc = IRQ_HANDLED; 5395 rc = IRQ_HANDLED;
5374 5396
5375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5397 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5376 5398
5377 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5399 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5378 list_del(&ipr_cmd->queue); 5400 list_del(&ipr_cmd->queue);
@@ -5965,14 +5987,14 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5965 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 5987 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5966 scsi_dma_unmap(scsi_cmd); 5988 scsi_dma_unmap(scsi_cmd);
5967 5989
5968 spin_lock_irqsave(ioa_cfg->host->host_lock, hrrq_flags); 5990 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
5969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 5991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5970 scsi_cmd->scsi_done(scsi_cmd); 5992 scsi_cmd->scsi_done(scsi_cmd);
5971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, hrrq_flags); 5993 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
5972 } else { 5994 } else {
5973 spin_lock_irqsave(ioa_cfg->host->host_lock, hrrq_flags); 5995 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
5974 ipr_erp_start(ioa_cfg, ipr_cmd); 5996 ipr_erp_start(ioa_cfg, ipr_cmd);
5975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, hrrq_flags); 5997 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
5976 } 5998 }
5977} 5999}
5978 6000
@@ -5995,26 +6017,34 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5995 struct ipr_resource_entry *res; 6017 struct ipr_resource_entry *res;
5996 struct ipr_ioarcb *ioarcb; 6018 struct ipr_ioarcb *ioarcb;
5997 struct ipr_cmnd *ipr_cmd; 6019 struct ipr_cmnd *ipr_cmd;
5998 unsigned long lock_flags; 6020 unsigned long hrrq_flags, lock_flags;
5999 int rc; 6021 int rc;
6000 struct ipr_hrr_queue *hrrq; 6022 struct ipr_hrr_queue *hrrq;
6001 int hrrq_id; 6023 int hrrq_id;
6002 6024
6003 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 6025 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6004 6026
6005 spin_lock_irqsave(shost->host_lock, lock_flags);
6006 scsi_cmd->result = (DID_OK << 16); 6027 scsi_cmd->result = (DID_OK << 16);
6007 res = scsi_cmd->device->hostdata; 6028 res = scsi_cmd->device->hostdata;
6029
6030 if (ipr_is_gata(res) && res->sata_port) {
6031 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6032 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6033 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6034 return rc;
6035 }
6036
6008 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6037 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6009 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6038 hrrq = &ioa_cfg->hrrq[hrrq_id];
6010 6039
6040 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6011 /* 6041 /*
6012 * We are currently blocking all devices due to a host reset 6042 * We are currently blocking all devices due to a host reset
6013 * We have told the host to stop giving us new requests, but 6043 * We have told the host to stop giving us new requests, but
6014 * ERP ops don't count. FIXME 6044 * ERP ops don't count. FIXME
6015 */ 6045 */
6016 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) { 6046 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
6017 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6047 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6018 return SCSI_MLQUEUE_HOST_BUSY; 6048 return SCSI_MLQUEUE_HOST_BUSY;
6019 } 6049 }
6020 6050
@@ -6022,23 +6052,17 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
6022 * FIXME - Create scsi_set_host_offline interface 6052 * FIXME - Create scsi_set_host_offline interface
6023 * and the ioa_is_dead check can be removed 6053 * and the ioa_is_dead check can be removed
6024 */ 6054 */
6025 if (unlikely(ioa_cfg->ioa_is_dead || !res)) { 6055 if (unlikely(hrrq->ioa_is_dead || !res)) {
6026 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6056 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6027 goto err_nodev; 6057 goto err_nodev;
6028 } 6058 }
6029 6059
6030 if (ipr_is_gata(res) && res->sata_port) {
6031 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6032 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6033 return rc;
6034 }
6035
6036 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6060 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6037 if (ipr_cmd == NULL) { 6061 if (ipr_cmd == NULL) {
6038 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6062 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6039 return SCSI_MLQUEUE_HOST_BUSY; 6063 return SCSI_MLQUEUE_HOST_BUSY;
6040 } 6064 }
6041 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6065 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6042 6066
6043 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); 6067 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6044 ioarcb = &ipr_cmd->ioarcb; 6068 ioarcb = &ipr_cmd->ioarcb;
@@ -6068,18 +6092,18 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
6068 else 6092 else
6069 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 6093 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6070 6094
6071 spin_lock_irqsave(shost->host_lock, lock_flags); 6095 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6072 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) { 6096 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6073 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6097 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6074 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6098 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6075 if (!rc) 6099 if (!rc)
6076 scsi_dma_unmap(scsi_cmd); 6100 scsi_dma_unmap(scsi_cmd);
6077 return SCSI_MLQUEUE_HOST_BUSY; 6101 return SCSI_MLQUEUE_HOST_BUSY;
6078 } 6102 }
6079 6103
6080 if (unlikely(ioa_cfg->ioa_is_dead)) { 6104 if (unlikely(hrrq->ioa_is_dead)) {
6081 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); 6105 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6082 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6106 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6083 scsi_dma_unmap(scsi_cmd); 6107 scsi_dma_unmap(scsi_cmd);
6084 goto err_nodev; 6108 goto err_nodev;
6085 } 6109 }
@@ -6092,15 +6116,15 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
6092 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); 6116 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6093 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6117 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6094 ipr_send_command(ipr_cmd); 6118 ipr_send_command(ipr_cmd);
6095 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6119 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6096 return 0; 6120 return 0;
6097 6121
6098err_nodev: 6122err_nodev:
6099 spin_lock_irqsave(shost->host_lock, lock_flags); 6123 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6100 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 6124 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6101 scsi_cmd->result = (DID_NO_CONNECT << 16); 6125 scsi_cmd->result = (DID_NO_CONNECT << 16);
6102 scsi_cmd->scsi_done(scsi_cmd); 6126 scsi_cmd->scsi_done(scsi_cmd);
6103 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6127 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6104 return 0; 6128 return 0;
6105} 6129}
6106 6130
@@ -6198,7 +6222,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
6198 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6222 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6199 } 6223 }
6200 6224
6201 if (!ioa_cfg->allow_cmds) 6225 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6202 goto out_unlock; 6226 goto out_unlock;
6203 6227
6204 rc = ipr_device_reset(ioa_cfg, res); 6228 rc = ipr_device_reset(ioa_cfg, res);
@@ -6240,12 +6264,14 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6240 } 6264 }
6241 6265
6242 for_each_hrrq(hrrq, ioa_cfg) { 6266 for_each_hrrq(hrrq, ioa_cfg) {
6267 spin_lock(&hrrq->_lock);
6243 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { 6268 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6244 if (ipr_cmd->qc == qc) { 6269 if (ipr_cmd->qc == qc) {
6245 ipr_device_reset(ioa_cfg, sata_port->res); 6270 ipr_device_reset(ioa_cfg, sata_port->res);
6246 break; 6271 break;
6247 } 6272 }
6248 } 6273 }
6274 spin_unlock(&hrrq->_lock);
6249 } 6275 }
6250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6251} 6277}
@@ -6294,6 +6320,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6294 struct ipr_resource_entry *res = sata_port->res; 6320 struct ipr_resource_entry *res = sata_port->res;
6295 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6321 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6296 6322
6323 spin_lock(&ipr_cmd->hrrq->_lock);
6297 if (ipr_cmd->ioa_cfg->sis64) 6324 if (ipr_cmd->ioa_cfg->sis64)
6298 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 6325 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6299 sizeof(struct ipr_ioasa_gata)); 6326 sizeof(struct ipr_ioasa_gata));
@@ -6310,6 +6337,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6310 else 6337 else
6311 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6338 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6312 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6339 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6340 spin_unlock(&ipr_cmd->hrrq->_lock);
6313 ata_qc_complete(qc); 6341 ata_qc_complete(qc);
6314} 6342}
6315 6343
@@ -6405,6 +6433,48 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6405} 6433}
6406 6434
6407/** 6435/**
6436 * ipr_qc_defer - Get a free ipr_cmd
6437 * @qc: queued command
6438 *
6439 * Return value:
6440 * 0 if success
6441 **/
6442static int ipr_qc_defer(struct ata_queued_cmd *qc)
6443{
6444 struct ata_port *ap = qc->ap;
6445 struct ipr_sata_port *sata_port = ap->private_data;
6446 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6447 struct ipr_cmnd *ipr_cmd;
6448 struct ipr_hrr_queue *hrrq;
6449 int hrrq_id;
6450
6451 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6452 hrrq = &ioa_cfg->hrrq[hrrq_id];
6453
6454 qc->lldd_task = NULL;
6455 spin_lock(&hrrq->_lock);
6456 if (unlikely(hrrq->ioa_is_dead)) {
6457 spin_unlock(&hrrq->_lock);
6458 return 0;
6459 }
6460
6461 if (unlikely(!hrrq->allow_cmds)) {
6462 spin_unlock(&hrrq->_lock);
6463 return ATA_DEFER_LINK;
6464 }
6465
6466 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6467 if (ipr_cmd == NULL) {
6468 spin_unlock(&hrrq->_lock);
6469 return ATA_DEFER_LINK;
6470 }
6471
6472 qc->lldd_task = ipr_cmd;
6473 spin_unlock(&hrrq->_lock);
6474 return 0;
6475}
6476
6477/**
6408 * ipr_qc_issue - Issue a SATA qc to a device 6478 * ipr_qc_issue - Issue a SATA qc to a device
6409 * @qc: queued command 6479 * @qc: queued command
6410 * 6480 *
@@ -6420,15 +6490,23 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6420 struct ipr_cmnd *ipr_cmd; 6490 struct ipr_cmnd *ipr_cmd;
6421 struct ipr_ioarcb *ioarcb; 6491 struct ipr_ioarcb *ioarcb;
6422 struct ipr_ioarcb_ata_regs *regs; 6492 struct ipr_ioarcb_ata_regs *regs;
6423 struct ipr_hrr_queue *hrrq;
6424 int hrrq_id;
6425 6493
6426 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) 6494 if (qc->lldd_task == NULL)
6495 ipr_qc_defer(qc);
6496
6497 ipr_cmd = qc->lldd_task;
6498 if (ipr_cmd == NULL)
6427 return AC_ERR_SYSTEM; 6499 return AC_ERR_SYSTEM;
6428 6500
6429 hrrq_id = ipr_get_hrrq_index(ioa_cfg); 6501 qc->lldd_task = NULL;
6430 hrrq = &ioa_cfg->hrrq[hrrq_id]; 6502 spin_lock(&ipr_cmd->hrrq->_lock);
6431 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); 6503 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6504 ipr_cmd->hrrq->ioa_is_dead)) {
6505 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6506 spin_unlock(&ipr_cmd->hrrq->_lock);
6507 return AC_ERR_SYSTEM;
6508 }
6509
6432 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 6510 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6433 ioarcb = &ipr_cmd->ioarcb; 6511 ioarcb = &ipr_cmd->ioarcb;
6434 6512
@@ -6441,7 +6519,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6441 memset(regs, 0, sizeof(*regs)); 6519 memset(regs, 0, sizeof(*regs));
6442 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 6520 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6443 6521
6444 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); 6522 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6445 ipr_cmd->qc = qc; 6523 ipr_cmd->qc = qc;
6446 ipr_cmd->done = ipr_sata_done; 6524 ipr_cmd->done = ipr_sata_done;
6447 ipr_cmd->ioarcb.res_handle = res->res_handle; 6525 ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6481,10 +6559,12 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6481 6559
6482 default: 6560 default:
6483 WARN_ON(1); 6561 WARN_ON(1);
6562 spin_unlock(&ipr_cmd->hrrq->_lock);
6484 return AC_ERR_INVALID; 6563 return AC_ERR_INVALID;
6485 } 6564 }
6486 6565
6487 ipr_send_command(ipr_cmd); 6566 ipr_send_command(ipr_cmd);
6567 spin_unlock(&ipr_cmd->hrrq->_lock);
6488 6568
6489 return 0; 6569 return 0;
6490} 6570}
@@ -6523,6 +6603,7 @@ static struct ata_port_operations ipr_sata_ops = {
6523 .hardreset = ipr_sata_reset, 6603 .hardreset = ipr_sata_reset,
6524 .post_internal_cmd = ipr_ata_post_internal, 6604 .post_internal_cmd = ipr_ata_post_internal,
6525 .qc_prep = ata_noop_qc_prep, 6605 .qc_prep = ata_noop_qc_prep,
6606 .qc_defer = ipr_qc_defer,
6526 .qc_issue = ipr_qc_issue, 6607 .qc_issue = ipr_qc_issue,
6527 .qc_fill_rtf = ipr_qc_fill_rtf, 6608 .qc_fill_rtf = ipr_qc_fill_rtf,
6528 .port_start = ata_sas_port_start, 6609 .port_start = ata_sas_port_start,
@@ -6620,11 +6701,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6620 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6701 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6621 struct ipr_resource_entry *res; 6702 struct ipr_resource_entry *res;
6622 struct ipr_hostrcb *hostrcb, *temp; 6703 struct ipr_hostrcb *hostrcb, *temp;
6623 int i = 0; 6704 int i = 0, j;
6624 6705
6625 ENTER; 6706 ENTER;
6626 ioa_cfg->in_reset_reload = 0; 6707 ioa_cfg->in_reset_reload = 0;
6627 ioa_cfg->allow_cmds = 1; 6708 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6709 spin_lock(&ioa_cfg->hrrq[j]._lock);
6710 ioa_cfg->hrrq[j].allow_cmds = 1;
6711 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6712 }
6713 wmb();
6628 ioa_cfg->reset_cmd = NULL; 6714 ioa_cfg->reset_cmd = NULL;
6629 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 6715 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6630 6716
@@ -6655,7 +6741,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6655 scsi_unblock_requests(ioa_cfg->host); 6741 scsi_unblock_requests(ioa_cfg->host);
6656 spin_lock(ioa_cfg->host->host_lock); 6742 spin_lock(ioa_cfg->host->host_lock);
6657 6743
6658 if (!ioa_cfg->allow_cmds) 6744 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6659 scsi_block_requests(ioa_cfg->host); 6745 scsi_block_requests(ioa_cfg->host);
6660 6746
6661 LEAVE; 6747 LEAVE;
@@ -7452,8 +7538,8 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7452 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7538 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7453 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 7539 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7454 7540
7455 if (ioa_cfg->hrrq_index < ioa_cfg->hrrq_num) { 7541 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7456 hrrq = &ioa_cfg->hrrq[ioa_cfg->hrrq_index]; 7542 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7457 7543
7458 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 7544 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7459 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7545 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
@@ -7481,7 +7567,8 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7481 (sizeof(u32) * hrrq->size) & 0xff; 7567 (sizeof(u32) * hrrq->size) & 0xff;
7482 7568
7483 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 7569 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7484 ioarcb->cmd_pkt.cdb[9] = ioa_cfg->hrrq_index; 7570 ioarcb->cmd_pkt.cdb[9] =
7571 ioa_cfg->identify_hrrq_index;
7485 7572
7486 if (ioa_cfg->sis64) { 7573 if (ioa_cfg->sis64) {
7487 ioarcb->cmd_pkt.cdb[10] = 7574 ioarcb->cmd_pkt.cdb[10] =
@@ -7495,24 +7582,19 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7495 } 7582 }
7496 7583
7497 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) 7584 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7498 ioarcb->cmd_pkt.cdb[14] = ioa_cfg->hrrq_index; 7585 ioarcb->cmd_pkt.cdb[14] =
7586 ioa_cfg->identify_hrrq_index;
7499 7587
7500 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7588 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7501 IPR_INTERNAL_TIMEOUT); 7589 IPR_INTERNAL_TIMEOUT);
7502 7590
7503 if (++ioa_cfg->hrrq_index < ioa_cfg->hrrq_num) 7591 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7504 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7592 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7505 7593
7506 LEAVE; 7594 LEAVE;
7507 return IPR_RC_JOB_RETURN; 7595 return IPR_RC_JOB_RETURN;
7508
7509 } 7596 }
7510 7597
7511 if (ioa_cfg->hrrq_num == 1)
7512 ioa_cfg->hrrq_index = 0;
7513 else
7514 ioa_cfg->hrrq_index = 1;
7515
7516 LEAVE; 7598 LEAVE;
7517 return IPR_RC_JOB_CONTINUE; 7599 return IPR_RC_JOB_CONTINUE;
7518} 7600}
@@ -7571,7 +7653,6 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7571 ipr_cmd->timer.expires = jiffies + timeout; 7653 ipr_cmd->timer.expires = jiffies + timeout;
7572 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; 7654 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7573 add_timer(&ipr_cmd->timer); 7655 add_timer(&ipr_cmd->timer);
7574 LEAVE;
7575} 7656}
7576 7657
7577/** 7658/**
@@ -7586,6 +7667,7 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7586 struct ipr_hrr_queue *hrrq; 7667 struct ipr_hrr_queue *hrrq;
7587 7668
7588 for_each_hrrq(hrrq, ioa_cfg) { 7669 for_each_hrrq(hrrq, ioa_cfg) {
7670 spin_lock(&hrrq->_lock);
7589 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); 7671 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7590 7672
7591 /* Initialize Host RRQ pointers */ 7673 /* Initialize Host RRQ pointers */
@@ -7593,9 +7675,15 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7593 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; 7675 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7594 hrrq->hrrq_curr = hrrq->hrrq_start; 7676 hrrq->hrrq_curr = hrrq->hrrq_start;
7595 hrrq->toggle_bit = 1; 7677 hrrq->toggle_bit = 1;
7678 spin_unlock(&hrrq->_lock);
7596 } 7679 }
7680 wmb();
7597 7681
7598 ioa_cfg->hrrq_index = 0; 7682 ioa_cfg->identify_hrrq_index = 0;
7683 if (ioa_cfg->hrrq_num == 1)
7684 atomic_set(&ioa_cfg->hrrq_index, 0);
7685 else
7686 atomic_set(&ioa_cfg->hrrq_index, 1);
7599 7687
7600 /* Zero out config table */ 7688 /* Zero out config table */
7601 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 7689 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7673,12 +7761,18 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7673 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7761 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7674 volatile u32 int_reg; 7762 volatile u32 int_reg;
7675 volatile u64 maskval; 7763 volatile u64 maskval;
7764 int i;
7676 7765
7677 ENTER; 7766 ENTER;
7678 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7767 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7679 ipr_init_ioa_mem(ioa_cfg); 7768 ipr_init_ioa_mem(ioa_cfg);
7680 7769
7681 ioa_cfg->allow_interrupts = 1; 7770 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7771 spin_lock(&ioa_cfg->hrrq[i]._lock);
7772 ioa_cfg->hrrq[i].allow_interrupts = 1;
7773 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7774 }
7775 wmb();
7682 if (ioa_cfg->sis64) { 7776 if (ioa_cfg->sis64) {
7683 /* Set the adapter to the correct endian mode. */ 7777 /* Set the adapter to the correct endian mode. */
7684 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 7778 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
@@ -8237,7 +8331,8 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8237 int rc = IPR_RC_JOB_CONTINUE; 8331 int rc = IPR_RC_JOB_CONTINUE;
8238 8332
8239 ENTER; 8333 ENTER;
8240 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) { 8334 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8335 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8241 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 8336 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8242 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 8337 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8243 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; 8338 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
@@ -8321,9 +8416,15 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8321 enum ipr_shutdown_type shutdown_type) 8416 enum ipr_shutdown_type shutdown_type)
8322{ 8417{
8323 struct ipr_cmnd *ipr_cmd; 8418 struct ipr_cmnd *ipr_cmd;
8419 int i;
8324 8420
8325 ioa_cfg->in_reset_reload = 1; 8421 ioa_cfg->in_reset_reload = 1;
8326 ioa_cfg->allow_cmds = 0; 8422 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8423 spin_lock(&ioa_cfg->hrrq[i]._lock);
8424 ioa_cfg->hrrq[i].allow_cmds = 0;
8425 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8426 }
8427 wmb();
8327 scsi_block_requests(ioa_cfg->host); 8428 scsi_block_requests(ioa_cfg->host);
8328 8429
8329 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 8430 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
@@ -8349,7 +8450,9 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8349static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, 8450static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8350 enum ipr_shutdown_type shutdown_type) 8451 enum ipr_shutdown_type shutdown_type)
8351{ 8452{
8352 if (ioa_cfg->ioa_is_dead) 8453 int i;
8454
8455 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8353 return; 8456 return;
8354 8457
8355 if (ioa_cfg->in_reset_reload) { 8458 if (ioa_cfg->in_reset_reload) {
@@ -8364,7 +8467,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8364 "IOA taken offline - error recovery failed\n"); 8467 "IOA taken offline - error recovery failed\n");
8365 8468
8366 ioa_cfg->reset_retries = 0; 8469 ioa_cfg->reset_retries = 0;
8367 ioa_cfg->ioa_is_dead = 1; 8470 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8471 spin_lock(&ioa_cfg->hrrq[i]._lock);
8472 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8473 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8474 }
8475 wmb();
8368 8476
8369 if (ioa_cfg->in_ioa_bringdown) { 8477 if (ioa_cfg->in_ioa_bringdown) {
8370 ioa_cfg->reset_cmd = NULL; 8478 ioa_cfg->reset_cmd = NULL;
@@ -8396,8 +8504,16 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8396 */ 8504 */
8397static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) 8505static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8398{ 8506{
8507 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8508 int i;
8509
8399 /* Disallow new interrupts, avoid loop */ 8510 /* Disallow new interrupts, avoid loop */
8400 ipr_cmd->ioa_cfg->allow_interrupts = 0; 8511 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8512 spin_lock(&ioa_cfg->hrrq[i]._lock);
8513 ioa_cfg->hrrq[i].allow_interrupts = 0;
8514 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8515 }
8516 wmb();
8401 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8517 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8402 ipr_cmd->done = ipr_reset_ioa_job; 8518 ipr_cmd->done = ipr_reset_ioa_job;
8403 return IPR_RC_JOB_RETURN; 8519 return IPR_RC_JOB_RETURN;
@@ -8455,13 +8571,19 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
8455{ 8571{
8456 unsigned long flags = 0; 8572 unsigned long flags = 0;
8457 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 8573 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8574 int i;
8458 8575
8459 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8576 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8460 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8577 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8461 ioa_cfg->sdt_state = ABORT_DUMP; 8578 ioa_cfg->sdt_state = ABORT_DUMP;
8462 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 8579 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8463 ioa_cfg->in_ioa_bringdown = 1; 8580 ioa_cfg->in_ioa_bringdown = 1;
8464 ioa_cfg->allow_cmds = 0; 8581 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8582 spin_lock(&ioa_cfg->hrrq[i]._lock);
8583 ioa_cfg->hrrq[i].allow_cmds = 0;
8584 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8585 }
8586 wmb();
8465 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 8587 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 8588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8467} 8589}
@@ -8522,7 +8644,7 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8522 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 8644 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8523 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8645 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8524 8646
8525 if (ioa_cfg->ioa_is_dead) { 8647 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8526 rc = -EIO; 8648 rc = -EIO;
8527 } else if (ipr_invalid_adapter(ioa_cfg)) { 8649 } else if (ipr_invalid_adapter(ioa_cfg)) {
8528 if (!ipr_testmode) 8650 if (!ipr_testmode)
@@ -8629,10 +8751,13 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8629 } else 8751 } else
8630 free_irq(pdev->irq, &ioa_cfg->hrrq[0]); 8752 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8631 8753
8632 if (ioa_cfg->intr_flag == IPR_USE_MSI) 8754 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8633 pci_disable_msi(pdev); 8755 pci_disable_msi(pdev);
8634 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) 8756 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8757 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8635 pci_disable_msix(pdev); 8758 pci_disable_msix(pdev);
8759 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8760 }
8636 8761
8637 iounmap(ioa_cfg->hdw_dma_regs); 8762 iounmap(ioa_cfg->hdw_dma_regs);
8638 pci_release_regions(pdev); 8763 pci_release_regions(pdev);
@@ -8800,6 +8925,11 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8800 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8925 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8801 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); 8926 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
8802 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); 8927 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
8928 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
8929 if (i == 0)
8930 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
8931 else
8932 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
8803 } 8933 }
8804 8934
8805 if (ipr_alloc_cmd_blks(ioa_cfg)) 8935 if (ipr_alloc_cmd_blks(ioa_cfg))
@@ -9154,9 +9284,9 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9154 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); 9284 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9155 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 9285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9156 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 9286 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9287 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9157 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9288 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9158 9289
9159 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9160 if (!ioa_cfg->msi_received) { 9290 if (!ioa_cfg->msi_received) {
9161 /* MSI test failed */ 9291 /* MSI test failed */
9162 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); 9292 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
@@ -9189,6 +9319,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9189 void __iomem *ipr_regs; 9319 void __iomem *ipr_regs;
9190 int rc = PCIBIOS_SUCCESSFUL; 9320 int rc = PCIBIOS_SUCCESSFUL;
9191 volatile u32 mask, uproc, interrupts; 9321 volatile u32 mask, uproc, interrupts;
9322 unsigned long lock_flags;
9192 9323
9193 ENTER; 9324 ENTER;
9194 9325
@@ -9291,10 +9422,10 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9291 } 9422 }
9292 9423
9293 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 9424 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9294 ipr_enable_msix(ioa_cfg) == 0) 9425 ipr_enable_msix(ioa_cfg) == 0)
9295 ioa_cfg->intr_flag = IPR_USE_MSIX; 9426 ioa_cfg->intr_flag = IPR_USE_MSIX;
9296 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && 9427 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9297 ipr_enable_msi(ioa_cfg) == 0) 9428 ipr_enable_msi(ioa_cfg) == 0)
9298 ioa_cfg->intr_flag = IPR_USE_MSI; 9429 ioa_cfg->intr_flag = IPR_USE_MSI;
9299 else { 9430 else {
9300 ioa_cfg->intr_flag = IPR_USE_LSI; 9431 ioa_cfg->intr_flag = IPR_USE_LSI;
@@ -9380,7 +9511,9 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9380 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 9511 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9381 ioa_cfg->ioa_unit_checked = 1; 9512 ioa_cfg->ioa_unit_checked = 1;
9382 9513
9514 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9383 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9515 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9516 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9384 9517
9385 if (ioa_cfg->intr_flag == IPR_USE_MSI 9518 if (ioa_cfg->intr_flag == IPR_USE_MSI
9386 || ioa_cfg->intr_flag == IPR_USE_MSIX) { 9519 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
@@ -9767,7 +9900,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9767 9900
9768 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { 9901 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9769 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 9902 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9770 if (!ioa_cfg->allow_cmds) { 9903 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
9771 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 9904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9772 continue; 9905 continue;
9773 } 9906 }