aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwenxiong@linux.vnet.ibm.com <wenxiong@linux.vnet.ibm.com>2013-01-11 18:43:50 -0500
committerJames Bottomley <JBottomley@Parallels.com>2013-01-29 18:48:51 -0500
commit05a6538a9a204999e0c0f7faee00b81b334f4fc7 (patch)
tree6a26faca4b997d778f336d03b6aef2738f053f92
parentb3b3b4070dc01f6b11cae6d5632cd0f428a81aab (diff)
[SCSI] ipr: Add support for MSI-X and distributed completion
The new generation IBM SAS Controllers will support MSI-X interrupts and Distributed Completion Processing features. This patch add these support in ipr device driver. Signed-off-by: Wen Xiong <wenxiong@linux.vnet.ibm.com> Acked-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r--drivers/scsi/ipr.c716
-rw-r--r--drivers/scsi/ipr.h70
2 files changed, 594 insertions, 192 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index bd20639ec66c..b8d759181611 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -98,6 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
98static unsigned int ipr_debug = 0; 98static unsigned int ipr_debug = 0;
99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; 99static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100static unsigned int ipr_dual_ioa_raid = 1; 100static unsigned int ipr_dual_ioa_raid = 1;
101static unsigned int ipr_number_of_msix = 2;
101static DEFINE_SPINLOCK(ipr_driver_lock); 102static DEFINE_SPINLOCK(ipr_driver_lock);
102 103
103/* This table describes the differences between DMA controller chips */ 104/* This table describes the differences between DMA controller chips */
@@ -215,6 +216,8 @@ MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to e
215module_param_named(max_devs, ipr_max_devs, int, 0); 216module_param_named(max_devs, ipr_max_devs, int, 0);
216MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " 217MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
217 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); 218 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
219module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
220MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
218MODULE_LICENSE("GPL"); 221MODULE_LICENSE("GPL");
219MODULE_VERSION(IPR_DRIVER_VERSION); 222MODULE_VERSION(IPR_DRIVER_VERSION);
220 223
@@ -595,8 +598,11 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
595 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; 598 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
596 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; 599 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
597 dma_addr_t dma_addr = ipr_cmd->dma_addr; 600 dma_addr_t dma_addr = ipr_cmd->dma_addr;
601 int hrrq_id;
598 602
603 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
599 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 604 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
605 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
600 ioarcb->data_transfer_length = 0; 606 ioarcb->data_transfer_length = 0;
601 ioarcb->read_data_transfer_length = 0; 607 ioarcb->read_data_transfer_length = 0;
602 ioarcb->ioadl_len = 0; 608 ioarcb->ioadl_len = 0;
@@ -646,12 +652,16 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
646 * pointer to ipr command struct 652 * pointer to ipr command struct
647 **/ 653 **/
648static 654static
649struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 655struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
650{ 656{
651 struct ipr_cmnd *ipr_cmd; 657 struct ipr_cmnd *ipr_cmd = NULL;
658
659 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
660 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
661 struct ipr_cmnd, queue);
662 list_del(&ipr_cmd->queue);
663 }
652 664
653 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
654 list_del(&ipr_cmd->queue);
655 665
656 return ipr_cmd; 666 return ipr_cmd;
657} 667}
@@ -666,7 +676,8 @@ struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
666static 676static
667struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 677struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
668{ 678{
669 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); 679 struct ipr_cmnd *ipr_cmd =
680 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
670 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); 681 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
671 return ipr_cmd; 682 return ipr_cmd;
672} 683}
@@ -761,13 +772,12 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
761 **/ 772 **/
762static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) 773static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
763{ 774{
764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
765 struct ata_queued_cmd *qc = ipr_cmd->qc; 775 struct ata_queued_cmd *qc = ipr_cmd->qc;
766 struct ipr_sata_port *sata_port = qc->ap->private_data; 776 struct ipr_sata_port *sata_port = qc->ap->private_data;
767 777
768 qc->err_mask |= AC_ERR_OTHER; 778 qc->err_mask |= AC_ERR_OTHER;
769 sata_port->ioasa.status |= ATA_BUSY; 779 sata_port->ioasa.status |= ATA_BUSY;
770 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 780 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
771 ata_qc_complete(qc); 781 ata_qc_complete(qc);
772} 782}
773 783
@@ -783,14 +793,13 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
783 **/ 793 **/
784static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) 794static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
785{ 795{
786 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
787 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 796 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
788 797
789 scsi_cmd->result |= (DID_ERROR << 16); 798 scsi_cmd->result |= (DID_ERROR << 16);
790 799
791 scsi_dma_unmap(ipr_cmd->scsi_cmd); 800 scsi_dma_unmap(ipr_cmd->scsi_cmd);
792 scsi_cmd->scsi_done(scsi_cmd); 801 scsi_cmd->scsi_done(scsi_cmd);
793 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 802 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
794} 803}
795 804
796/** 805/**
@@ -805,24 +814,30 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
805static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) 814static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
806{ 815{
807 struct ipr_cmnd *ipr_cmd, *temp; 816 struct ipr_cmnd *ipr_cmd, *temp;
817 struct ipr_hrr_queue *hrrq;
808 818
809 ENTER; 819 ENTER;
810 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { 820 for_each_hrrq(hrrq, ioa_cfg) {
811 list_del(&ipr_cmd->queue); 821 list_for_each_entry_safe(ipr_cmd,
822 temp, &hrrq->hrrq_pending_q, queue) {
823 list_del(&ipr_cmd->queue);
812 824
813 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 825 ipr_cmd->s.ioasa.hdr.ioasc =
814 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); 826 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
827 ipr_cmd->s.ioasa.hdr.ilid =
828 cpu_to_be32(IPR_DRIVER_ILID);
815 829
816 if (ipr_cmd->scsi_cmd) 830 if (ipr_cmd->scsi_cmd)
817 ipr_cmd->done = ipr_scsi_eh_done; 831 ipr_cmd->done = ipr_scsi_eh_done;
818 else if (ipr_cmd->qc) 832 else if (ipr_cmd->qc)
819 ipr_cmd->done = ipr_sata_eh_done; 833 ipr_cmd->done = ipr_sata_eh_done;
820 834
821 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); 835 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
822 del_timer(&ipr_cmd->timer); 836 IPR_IOASC_IOA_WAS_RESET);
823 ipr_cmd->done(ipr_cmd); 837 del_timer(&ipr_cmd->timer);
838 ipr_cmd->done(ipr_cmd);
839 }
824 } 840 }
825
826 LEAVE; 841 LEAVE;
827} 842}
828 843
@@ -872,9 +887,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
872 void (*done) (struct ipr_cmnd *), 887 void (*done) (struct ipr_cmnd *),
873 void (*timeout_func) (struct ipr_cmnd *), u32 timeout) 888 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
874{ 889{
875 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 890 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
876
877 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
878 891
879 ipr_cmd->done = done; 892 ipr_cmd->done = done;
880 893
@@ -975,6 +988,17 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
975 spin_lock_irq(ioa_cfg->host->host_lock); 988 spin_lock_irq(ioa_cfg->host->host_lock);
976} 989}
977 990
991static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
992{
993 if (ioa_cfg->hrrq_num == 1)
994 ioa_cfg->hrrq_index = 0;
995 else {
996 if (++ioa_cfg->hrrq_index >= ioa_cfg->hrrq_num)
997 ioa_cfg->hrrq_index = 1;
998 }
999 return ioa_cfg->hrrq_index;
1000}
1001
978/** 1002/**
979 * ipr_send_hcam - Send an HCAM to the adapter. 1003 * ipr_send_hcam - Send an HCAM to the adapter.
980 * @ioa_cfg: ioa config struct 1004 * @ioa_cfg: ioa config struct
@@ -996,7 +1020,7 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
996 1020
997 if (ioa_cfg->allow_cmds) { 1021 if (ioa_cfg->allow_cmds) {
998 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 1022 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
999 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 1023 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1000 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); 1024 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1001 1025
1002 ipr_cmd->u.hostrcb = hostrcb; 1026 ipr_cmd->u.hostrcb = hostrcb;
@@ -1385,7 +1409,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1385 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1409 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1386 1410
1387 list_del(&hostrcb->queue); 1411 list_del(&hostrcb->queue);
1388 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 1412 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1389 1413
1390 if (ioasc) { 1414 if (ioasc) {
1391 if (ioasc != IPR_IOASC_IOA_WAS_RESET) 1415 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
@@ -2437,7 +2461,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2437 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2461 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2438 2462
2439 list_del(&hostrcb->queue); 2463 list_del(&hostrcb->queue);
2440 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 2464 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2441 2465
2442 if (!ioasc) { 2466 if (!ioasc) {
2443 ipr_handle_log_data(ioa_cfg, hostrcb); 2467 ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -4751,7 +4775,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4751 4775
4752 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 4776 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4753 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 4777 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4754 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4778 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4755 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { 4779 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4756 if (ipr_cmd->ioa_cfg->sis64) 4780 if (ipr_cmd->ioa_cfg->sis64)
4757 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, 4781 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
@@ -4821,6 +4845,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4821 struct ipr_resource_entry *res; 4845 struct ipr_resource_entry *res;
4822 struct ata_port *ap; 4846 struct ata_port *ap;
4823 int rc = 0; 4847 int rc = 0;
4848 struct ipr_hrr_queue *hrrq;
4824 4849
4825 ENTER; 4850 ENTER;
4826 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; 4851 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -4839,19 +4864,21 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4839 if (ioa_cfg->ioa_is_dead) 4864 if (ioa_cfg->ioa_is_dead)
4840 return FAILED; 4865 return FAILED;
4841 4866
4842 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4867 for_each_hrrq(hrrq, ioa_cfg) {
4843 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4868 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4844 if (ipr_cmd->scsi_cmd) 4869 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4845 ipr_cmd->done = ipr_scsi_eh_done; 4870 if (ipr_cmd->scsi_cmd)
4846 if (ipr_cmd->qc) 4871 ipr_cmd->done = ipr_scsi_eh_done;
4847 ipr_cmd->done = ipr_sata_eh_done; 4872 if (ipr_cmd->qc)
4848 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { 4873 ipr_cmd->done = ipr_sata_eh_done;
4849 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; 4874 if (ipr_cmd->qc &&
4850 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; 4875 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4876 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4877 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4878 }
4851 } 4879 }
4852 } 4880 }
4853 } 4881 }
4854
4855 res->resetting_device = 1; 4882 res->resetting_device = 1;
4856 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n"); 4883 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4857 4884
@@ -4861,10 +4888,14 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4861 ata_std_error_handler(ap); 4888 ata_std_error_handler(ap);
4862 spin_lock_irq(scsi_cmd->device->host->host_lock); 4889 spin_lock_irq(scsi_cmd->device->host->host_lock);
4863 4890
4864 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4891 for_each_hrrq(hrrq, ioa_cfg) {
4865 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { 4892 list_for_each_entry(ipr_cmd,
4866 rc = -EIO; 4893 &hrrq->hrrq_pending_q, queue) {
4867 break; 4894 if (ipr_cmd->ioarcb.res_handle ==
4895 res->res_handle) {
4896 rc = -EIO;
4897 break;
4898 }
4868 } 4899 }
4869 } 4900 }
4870 } else 4901 } else
@@ -4918,7 +4949,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4918 else 4949 else
4919 ipr_cmd->sibling->done(ipr_cmd->sibling); 4950 ipr_cmd->sibling->done(ipr_cmd->sibling);
4920 4951
4921 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4922 LEAVE; 4953 LEAVE;
4923} 4954}
4924 4955
@@ -4979,6 +5010,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
4979 struct ipr_cmd_pkt *cmd_pkt; 5010 struct ipr_cmd_pkt *cmd_pkt;
4980 u32 ioasc, int_reg; 5011 u32 ioasc, int_reg;
4981 int op_found = 0; 5012 int op_found = 0;
5013 struct ipr_hrr_queue *hrrq;
4982 5014
4983 ENTER; 5015 ENTER;
4984 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5016 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
@@ -5003,11 +5035,13 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5003 if (!ipr_is_gscsi(res)) 5035 if (!ipr_is_gscsi(res))
5004 return FAILED; 5036 return FAILED;
5005 5037
5006 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 5038 for_each_hrrq(hrrq, ioa_cfg) {
5007 if (ipr_cmd->scsi_cmd == scsi_cmd) { 5039 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5008 ipr_cmd->done = ipr_scsi_eh_done; 5040 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5009 op_found = 1; 5041 ipr_cmd->done = ipr_scsi_eh_done;
5010 break; 5042 op_found = 1;
5043 break;
5044 }
5011 } 5045 }
5012 } 5046 }
5013 5047
@@ -5035,7 +5069,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5035 ipr_trace; 5069 ipr_trace;
5036 } 5070 }
5037 5071
5038 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5072 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
5039 if (!ipr_is_naca_model(res)) 5073 if (!ipr_is_naca_model(res))
5040 res->needs_sync_complete = 1; 5074 res->needs_sync_complete = 1;
5041 5075
@@ -5078,7 +5112,6 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5078{ 5112{
5079 irqreturn_t rc = IRQ_HANDLED; 5113 irqreturn_t rc = IRQ_HANDLED;
5080 u32 int_mask_reg; 5114 u32 int_mask_reg;
5081
5082 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); 5115 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5083 int_reg &= ~int_mask_reg; 5116 int_reg &= ~int_mask_reg;
5084 5117
@@ -5127,6 +5160,9 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5127 } else { 5160 } else {
5128 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5161 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5129 ioa_cfg->ioa_unit_checked = 1; 5162 ioa_cfg->ioa_unit_checked = 1;
5163 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5164 dev_err(&ioa_cfg->pdev->dev,
5165 "No Host RRQ. 0x%08X\n", int_reg);
5130 else 5166 else
5131 dev_err(&ioa_cfg->pdev->dev, 5167 dev_err(&ioa_cfg->pdev->dev,
5132 "Permanent IOA failure. 0x%08X\n", int_reg); 5168 "Permanent IOA failure. 0x%08X\n", int_reg);
@@ -5137,7 +5173,6 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5137 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 5173 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5138 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5174 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5139 } 5175 }
5140
5141 return rc; 5176 return rc;
5142} 5177}
5143 5178
@@ -5149,10 +5184,10 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5149 * Return value: 5184 * Return value:
5150 * none 5185 * none
5151 **/ 5186 **/
5152static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg) 5187static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5153{ 5188{
5154 ioa_cfg->errors_logged++; 5189 ioa_cfg->errors_logged++;
5155 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg); 5190 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5156 5191
5157 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) 5192 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5158 ioa_cfg->sdt_state = GET_DUMP; 5193 ioa_cfg->sdt_state = GET_DUMP;
@@ -5160,6 +5195,51 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5160 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 5195 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5161} 5196}
5162 5197
5198static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
5199 struct list_head *doneq)
5200{
5201 u32 ioasc;
5202 u16 cmd_index;
5203 struct ipr_cmnd *ipr_cmd;
5204 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5205 int num_hrrq = 0;
5206
5207 /* If interrupts are disabled, ignore the interrupt */
5208 if (!ioa_cfg->allow_interrupts)
5209 return 0;
5210
5211 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5212 hrr_queue->toggle_bit) {
5213
5214 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5215 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5216 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5217
5218 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5219 cmd_index < hrr_queue->min_cmd_id)) {
5220 ipr_isr_eh(ioa_cfg,
5221 "Invalid response handle from IOA: ",
5222 cmd_index);
5223 break;
5224 }
5225
5226 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5227 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5228
5229 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5230
5231 list_move_tail(&ipr_cmd->queue, doneq);
5232
5233 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5234 hrr_queue->hrrq_curr++;
5235 } else {
5236 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5237 hrr_queue->toggle_bit ^= 1u;
5238 }
5239 num_hrrq++;
5240 }
5241 return num_hrrq;
5242}
5163/** 5243/**
5164 * ipr_isr - Interrupt service routine 5244 * ipr_isr - Interrupt service routine
5165 * @irq: irq number 5245 * @irq: irq number
@@ -5170,7 +5250,8 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5170 **/ 5250 **/
5171static irqreturn_t ipr_isr(int irq, void *devp) 5251static irqreturn_t ipr_isr(int irq, void *devp)
5172{ 5252{
5173 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 5253 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5254 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5174 unsigned long lock_flags = 0; 5255 unsigned long lock_flags = 0;
5175 u32 int_reg = 0; 5256 u32 int_reg = 0;
5176 u32 ioasc; 5257 u32 ioasc;
@@ -5182,7 +5263,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5182 LIST_HEAD(doneq); 5263 LIST_HEAD(doneq);
5183 5264
5184 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5265 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5185
5186 /* If interrupts are disabled, ignore the interrupt */ 5266 /* If interrupts are disabled, ignore the interrupt */
5187 if (!ioa_cfg->allow_interrupts) { 5267 if (!ioa_cfg->allow_interrupts) {
5188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -5192,20 +5272,22 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5192 while (1) { 5272 while (1) {
5193 ipr_cmd = NULL; 5273 ipr_cmd = NULL;
5194 5274
5195 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5275 while ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5196 ioa_cfg->toggle_bit) { 5276 hrrq->toggle_bit) {
5197 5277
5198 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) & 5278 cmd_index = (be32_to_cpu(*hrrq->hrrq_curr) &
5199 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 5279 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5200 5280
5201 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { 5281 if (unlikely(cmd_index > hrrq->max_cmd_id ||
5202 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA"); 5282 cmd_index < hrrq->min_cmd_id)) {
5283 ipr_isr_eh(ioa_cfg,
5284 "Invalid response handle from IOA: ",
5285 cmd_index);
5203 rc = IRQ_HANDLED; 5286 rc = IRQ_HANDLED;
5204 goto unlock_out; 5287 goto unlock_out;
5205 } 5288 }
5206 5289
5207 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5290 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5208
5209 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5291 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5210 5292
5211 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5293 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
@@ -5214,11 +5296,11 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5214 5296
5215 rc = IRQ_HANDLED; 5297 rc = IRQ_HANDLED;
5216 5298
5217 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) { 5299 if (hrrq->hrrq_curr < hrrq->hrrq_end) {
5218 ioa_cfg->hrrq_curr++; 5300 hrrq->hrrq_curr++;
5219 } else { 5301 } else {
5220 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; 5302 hrrq->hrrq_curr = hrrq->hrrq_start;
5221 ioa_cfg->toggle_bit ^= 1u; 5303 hrrq->toggle_bit ^= 1u;
5222 } 5304 }
5223 } 5305 }
5224 5306
@@ -5239,7 +5321,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5239 irq_none++; 5321 irq_none++;
5240 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5322 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5241 int_reg & IPR_PCII_HRRQ_UPDATED) { 5323 int_reg & IPR_PCII_HRRQ_UPDATED) {
5242 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); 5324 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ: ", num_hrrq);
5243 rc = IRQ_HANDLED; 5325 rc = IRQ_HANDLED;
5244 goto unlock_out; 5326 goto unlock_out;
5245 } else 5327 } else
@@ -5256,7 +5338,47 @@ unlock_out:
5256 del_timer(&ipr_cmd->timer); 5338 del_timer(&ipr_cmd->timer);
5257 ipr_cmd->fast_done(ipr_cmd); 5339 ipr_cmd->fast_done(ipr_cmd);
5258 } 5340 }
5341 return rc;
5342}
5343
5344/**
5345 * ipr_isr_mhrrq - Interrupt service routine
5346 * @irq: irq number
5347 * @devp: pointer to ioa config struct
5348 *
5349 * Return value:
5350 * IRQ_NONE / IRQ_HANDLED
5351 **/
5352static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5353{
5354 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5355 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5356 unsigned long lock_flags = 0;
5357 struct ipr_cmnd *ipr_cmd, *temp;
5358 irqreturn_t rc = IRQ_NONE;
5359 LIST_HEAD(doneq);
5259 5360
5361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5362
5363 /* If interrupts are disabled, ignore the interrupt */
5364 if (!ioa_cfg->allow_interrupts) {
5365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5366 return IRQ_NONE;
5367 }
5368
5369 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5370 hrrq->toggle_bit)
5371
5372 if (__ipr_process_hrrq(hrrq, &doneq))
5373 rc = IRQ_HANDLED;
5374
5375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5376
5377 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5378 list_del(&ipr_cmd->queue);
5379 del_timer(&ipr_cmd->timer);
5380 ipr_cmd->fast_done(ipr_cmd);
5381 }
5260 return rc; 5382 return rc;
5261} 5383}
5262 5384
@@ -5416,7 +5538,6 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5416{ 5538{
5417 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5539 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5418 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5540 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5419 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5420 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5541 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5421 5542
5422 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5543 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
@@ -5434,7 +5555,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5434 res->in_erp = 0; 5555 res->in_erp = 0;
5435 } 5556 }
5436 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5557 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5437 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5558 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5438 scsi_cmd->scsi_done(scsi_cmd); 5559 scsi_cmd->scsi_done(scsi_cmd);
5439} 5560}
5440 5561
@@ -5818,7 +5939,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5818 } 5939 }
5819 5940
5820 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5941 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5821 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5942 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5822 scsi_cmd->scsi_done(scsi_cmd); 5943 scsi_cmd->scsi_done(scsi_cmd);
5823} 5944}
5824 5945
@@ -5837,21 +5958,21 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5958 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5838 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5959 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5839 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5960 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5840 unsigned long lock_flags; 5961 unsigned long hrrq_flags;
5841 5962
5842 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 5963 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5843 5964
5844 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 5965 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5845 scsi_dma_unmap(scsi_cmd); 5966 scsi_dma_unmap(scsi_cmd);
5846 5967
5847 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5968 spin_lock_irqsave(ioa_cfg->host->host_lock, hrrq_flags);
5848 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5849 scsi_cmd->scsi_done(scsi_cmd); 5970 scsi_cmd->scsi_done(scsi_cmd);
5850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, hrrq_flags);
5851 } else { 5972 } else {
5852 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5973 spin_lock_irqsave(ioa_cfg->host->host_lock, hrrq_flags);
5853 ipr_erp_start(ioa_cfg, ipr_cmd); 5974 ipr_erp_start(ioa_cfg, ipr_cmd);
5854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, hrrq_flags);
5855 } 5976 }
5856} 5977}
5857 5978
@@ -5876,12 +5997,16 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5876 struct ipr_cmnd *ipr_cmd; 5997 struct ipr_cmnd *ipr_cmd;
5877 unsigned long lock_flags; 5998 unsigned long lock_flags;
5878 int rc; 5999 int rc;
6000 struct ipr_hrr_queue *hrrq;
6001 int hrrq_id;
5879 6002
5880 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; 6003 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
5881 6004
5882 spin_lock_irqsave(shost->host_lock, lock_flags); 6005 spin_lock_irqsave(shost->host_lock, lock_flags);
5883 scsi_cmd->result = (DID_OK << 16); 6006 scsi_cmd->result = (DID_OK << 16);
5884 res = scsi_cmd->device->hostdata; 6007 res = scsi_cmd->device->hostdata;
6008 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6009 hrrq = &ioa_cfg->hrrq[hrrq_id];
5885 6010
5886 /* 6011 /*
5887 * We are currently blocking all devices due to a host reset 6012 * We are currently blocking all devices due to a host reset
@@ -5908,7 +6033,11 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5908 return rc; 6033 return rc;
5909 } 6034 }
5910 6035
5911 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); 6036 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6037 if (ipr_cmd == NULL) {
6038 spin_unlock_irqrestore(shost->host_lock, lock_flags);
6039 return SCSI_MLQUEUE_HOST_BUSY;
6040 }
5912 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6041 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5913 6042
5914 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); 6043 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
@@ -5930,8 +6059,9 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5930 } 6059 }
5931 6060
5932 if (scsi_cmd->cmnd[0] >= 0xC0 && 6061 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5933 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 6062 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
5934 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6063 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6064 }
5935 6065
5936 if (ioa_cfg->sis64) 6066 if (ioa_cfg->sis64)
5937 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 6067 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
@@ -5940,7 +6070,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5940 6070
5941 spin_lock_irqsave(shost->host_lock, lock_flags); 6071 spin_lock_irqsave(shost->host_lock, lock_flags);
5942 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) { 6072 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
5943 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6073 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
5944 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6074 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5945 if (!rc) 6075 if (!rc)
5946 scsi_dma_unmap(scsi_cmd); 6076 scsi_dma_unmap(scsi_cmd);
@@ -5948,7 +6078,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5948 } 6078 }
5949 6079
5950 if (unlikely(ioa_cfg->ioa_is_dead)) { 6080 if (unlikely(ioa_cfg->ioa_is_dead)) {
5951 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6081 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
5952 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6082 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5953 scsi_dma_unmap(scsi_cmd); 6083 scsi_dma_unmap(scsi_cmd);
5954 goto err_nodev; 6084 goto err_nodev;
@@ -5959,7 +6089,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
5959 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; 6089 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5960 res->needs_sync_complete = 0; 6090 res->needs_sync_complete = 0;
5961 } 6091 }
5962 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 6092 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
5963 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); 6093 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5964 ipr_send_command(ipr_cmd); 6094 ipr_send_command(ipr_cmd);
5965 spin_unlock_irqrestore(shost->host_lock, lock_flags); 6095 spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -6099,6 +6229,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6099 struct ipr_sata_port *sata_port = qc->ap->private_data; 6229 struct ipr_sata_port *sata_port = qc->ap->private_data;
6100 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; 6230 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6101 struct ipr_cmnd *ipr_cmd; 6231 struct ipr_cmnd *ipr_cmd;
6232 struct ipr_hrr_queue *hrrq;
6102 unsigned long flags; 6233 unsigned long flags;
6103 6234
6104 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6235 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6108,10 +6239,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6108 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 6239 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6109 } 6240 }
6110 6241
6111 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 6242 for_each_hrrq(hrrq, ioa_cfg) {
6112 if (ipr_cmd->qc == qc) { 6243 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6113 ipr_device_reset(ioa_cfg, sata_port->res); 6244 if (ipr_cmd->qc == qc) {
6114 break; 6245 ipr_device_reset(ioa_cfg, sata_port->res);
6246 break;
6247 }
6115 } 6248 }
6116 } 6249 }
6117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 6250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -6176,7 +6309,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6176 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); 6309 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6177 else 6310 else
6178 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); 6311 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6179 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6312 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6180 ata_qc_complete(qc); 6313 ata_qc_complete(qc);
6181} 6314}
6182 6315
@@ -6287,11 +6420,16 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6287 struct ipr_cmnd *ipr_cmd; 6420 struct ipr_cmnd *ipr_cmd;
6288 struct ipr_ioarcb *ioarcb; 6421 struct ipr_ioarcb *ioarcb;
6289 struct ipr_ioarcb_ata_regs *regs; 6422 struct ipr_ioarcb_ata_regs *regs;
6423 struct ipr_hrr_queue *hrrq;
6424 int hrrq_id;
6290 6425
6291 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead)) 6426 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6292 return AC_ERR_SYSTEM; 6427 return AC_ERR_SYSTEM;
6293 6428
6294 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 6429 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6430 hrrq = &ioa_cfg->hrrq[hrrq_id];
6431 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6432 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6295 ioarcb = &ipr_cmd->ioarcb; 6433 ioarcb = &ipr_cmd->ioarcb;
6296 6434
6297 if (ioa_cfg->sis64) { 6435 if (ioa_cfg->sis64) {
@@ -6303,7 +6441,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6303 memset(regs, 0, sizeof(*regs)); 6441 memset(regs, 0, sizeof(*regs));
6304 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); 6442 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6305 6443
6306 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 6444 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6307 ipr_cmd->qc = qc; 6445 ipr_cmd->qc = qc;
6308 ipr_cmd->done = ipr_sata_done; 6446 ipr_cmd->done = ipr_sata_done;
6309 ipr_cmd->ioarcb.res_handle = res->res_handle; 6447 ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6455,7 +6593,7 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6455 ENTER; 6593 ENTER;
6456 ioa_cfg->in_reset_reload = 0; 6594 ioa_cfg->in_reset_reload = 0;
6457 ioa_cfg->reset_retries = 0; 6595 ioa_cfg->reset_retries = 0;
6458 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6596 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6459 wake_up_all(&ioa_cfg->reset_wait_q); 6597 wake_up_all(&ioa_cfg->reset_wait_q);
6460 6598
6461 spin_unlock_irq(ioa_cfg->host->host_lock); 6599 spin_unlock_irq(ioa_cfg->host->host_lock);
@@ -6510,7 +6648,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6510 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); 6648 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6511 6649
6512 ioa_cfg->reset_retries = 0; 6650 ioa_cfg->reset_retries = 0;
6513 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6651 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6514 wake_up_all(&ioa_cfg->reset_wait_q); 6652 wake_up_all(&ioa_cfg->reset_wait_q);
6515 6653
6516 spin_unlock(ioa_cfg->host->host_lock); 6654 spin_unlock(ioa_cfg->host->host_lock);
@@ -6588,9 +6726,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6588 6726
6589 if (!ioa_cfg->sis64) 6727 if (!ioa_cfg->sis64)
6590 ipr_cmd->job_step = ipr_set_supported_devs; 6728 ipr_cmd->job_step = ipr_set_supported_devs;
6729 LEAVE;
6591 return IPR_RC_JOB_RETURN; 6730 return IPR_RC_JOB_RETURN;
6592 } 6731 }
6593 6732
6733 LEAVE;
6594 return IPR_RC_JOB_CONTINUE; 6734 return IPR_RC_JOB_CONTINUE;
6595} 6735}
6596 6736
@@ -6848,7 +6988,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6848 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); 6988 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6849 6989
6850 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 6990 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6851 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 6991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6852 return IPR_RC_JOB_RETURN; 6992 return IPR_RC_JOB_RETURN;
6853} 6993}
6854 6994
@@ -7306,46 +7446,75 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7306{ 7446{
7307 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7447 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7308 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 7448 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7449 struct ipr_hrr_queue *hrrq;
7309 7450
7310 ENTER; 7451 ENTER;
7452 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7311 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 7453 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7312 7454
7313 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; 7455 if (ioa_cfg->hrrq_index < ioa_cfg->hrrq_num) {
7314 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 7456 hrrq = &ioa_cfg->hrrq[ioa_cfg->hrrq_index];
7315 7457
7316 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 7458 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7317 if (ioa_cfg->sis64) 7459 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7318 ioarcb->cmd_pkt.cdb[1] = 0x1;
7319 ioarcb->cmd_pkt.cdb[2] =
7320 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7321 ioarcb->cmd_pkt.cdb[3] =
7322 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7323 ioarcb->cmd_pkt.cdb[4] =
7324 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7325 ioarcb->cmd_pkt.cdb[5] =
7326 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7327 ioarcb->cmd_pkt.cdb[7] =
7328 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7329 ioarcb->cmd_pkt.cdb[8] =
7330 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7331 7460
7332 if (ioa_cfg->sis64) { 7461 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7333 ioarcb->cmd_pkt.cdb[10] = 7462 if (ioa_cfg->sis64)
7334 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff; 7463 ioarcb->cmd_pkt.cdb[1] = 0x1;
7335 ioarcb->cmd_pkt.cdb[11] =
7336 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7337 ioarcb->cmd_pkt.cdb[12] =
7338 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7339 ioarcb->cmd_pkt.cdb[13] =
7340 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7341 }
7342 7464
7343 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7465 if (ioa_cfg->nvectors == 1)
7466 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7467 else
7468 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7469
7470 ioarcb->cmd_pkt.cdb[2] =
7471 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7472 ioarcb->cmd_pkt.cdb[3] =
7473 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7474 ioarcb->cmd_pkt.cdb[4] =
7475 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7476 ioarcb->cmd_pkt.cdb[5] =
7477 ((u64) hrrq->host_rrq_dma) & 0xff;
7478 ioarcb->cmd_pkt.cdb[7] =
7479 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7480 ioarcb->cmd_pkt.cdb[8] =
7481 (sizeof(u32) * hrrq->size) & 0xff;
7482
7483 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7484 ioarcb->cmd_pkt.cdb[9] = ioa_cfg->hrrq_index;
7344 7485
7345 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7486 if (ioa_cfg->sis64) {
7487 ioarcb->cmd_pkt.cdb[10] =
7488 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7489 ioarcb->cmd_pkt.cdb[11] =
7490 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7491 ioarcb->cmd_pkt.cdb[12] =
7492 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7493 ioarcb->cmd_pkt.cdb[13] =
7494 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7495 }
7496
7497 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7498 ioarcb->cmd_pkt.cdb[14] = ioa_cfg->hrrq_index;
7499
7500 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7501 IPR_INTERNAL_TIMEOUT);
7502
7503 if (++ioa_cfg->hrrq_index < ioa_cfg->hrrq_num)
7504 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7505
7506 LEAVE;
7507 return IPR_RC_JOB_RETURN;
7508
7509 }
7510
7511 if (ioa_cfg->hrrq_num == 1)
7512 ioa_cfg->hrrq_index = 0;
7513 else
7514 ioa_cfg->hrrq_index = 1;
7346 7515
7347 LEAVE; 7516 LEAVE;
7348 return IPR_RC_JOB_RETURN; 7517 return IPR_RC_JOB_CONTINUE;
7349} 7518}
7350 7519
7351/** 7520/**
@@ -7393,13 +7562,16 @@ static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7393static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, 7562static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7394 unsigned long timeout) 7563 unsigned long timeout)
7395{ 7564{
7396 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); 7565
7566 ENTER;
7567 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7397 ipr_cmd->done = ipr_reset_ioa_job; 7568 ipr_cmd->done = ipr_reset_ioa_job;
7398 7569
7399 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7570 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7400 ipr_cmd->timer.expires = jiffies + timeout; 7571 ipr_cmd->timer.expires = jiffies + timeout;
7401 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; 7572 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7402 add_timer(&ipr_cmd->timer); 7573 add_timer(&ipr_cmd->timer);
7574 LEAVE;
7403} 7575}
7404 7576
7405/** 7577/**
@@ -7411,13 +7583,19 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7411 **/ 7583 **/
7412static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) 7584static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7413{ 7585{
7414 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS); 7586 struct ipr_hrr_queue *hrrq;
7415 7587
7416 /* Initialize Host RRQ pointers */ 7588 for_each_hrrq(hrrq, ioa_cfg) {
7417 ioa_cfg->hrrq_start = ioa_cfg->host_rrq; 7589 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7418 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1]; 7590
7419 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; 7591 /* Initialize Host RRQ pointers */
7420 ioa_cfg->toggle_bit = 1; 7592 hrrq->hrrq_start = hrrq->host_rrq;
7593 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7594 hrrq->hrrq_curr = hrrq->hrrq_start;
7595 hrrq->toggle_bit = 1;
7596 }
7597
7598 ioa_cfg->hrrq_index = 0;
7421 7599
7422 /* Zero out config table */ 7600 /* Zero out config table */
7423 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); 7601 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7474,7 +7652,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7474 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7652 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7475 ipr_cmd->done = ipr_reset_ioa_job; 7653 ipr_cmd->done = ipr_reset_ioa_job;
7476 add_timer(&ipr_cmd->timer); 7654 add_timer(&ipr_cmd->timer);
7477 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 7655
7656 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7478 7657
7479 return IPR_RC_JOB_RETURN; 7658 return IPR_RC_JOB_RETURN;
7480} 7659}
@@ -7539,7 +7718,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7539 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7718 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7540 ipr_cmd->done = ipr_reset_ioa_job; 7719 ipr_cmd->done = ipr_reset_ioa_job;
7541 add_timer(&ipr_cmd->timer); 7720 add_timer(&ipr_cmd->timer);
7542 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 7721 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7543 7722
7544 LEAVE; 7723 LEAVE;
7545 return IPR_RC_JOB_RETURN; 7724 return IPR_RC_JOB_RETURN;
@@ -8106,7 +8285,8 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8106 * We are doing nested adapter resets and this is 8285 * We are doing nested adapter resets and this is
8107 * not the current reset job. 8286 * not the current reset job.
8108 */ 8287 */
8109 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 8288 list_add_tail(&ipr_cmd->queue,
8289 &ipr_cmd->hrrq->hrrq_free_q);
8110 return; 8290 return;
8111 } 8291 }
8112 8292
@@ -8218,7 +8398,7 @@ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8218{ 8398{
8219 /* Disallow new interrupts, avoid loop */ 8399 /* Disallow new interrupts, avoid loop */
8220 ipr_cmd->ioa_cfg->allow_interrupts = 0; 8400 ipr_cmd->ioa_cfg->allow_interrupts = 0;
8221 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); 8401 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8222 ipr_cmd->done = ipr_reset_ioa_job; 8402 ipr_cmd->done = ipr_reset_ioa_job;
8223 return IPR_RC_JOB_RETURN; 8403 return IPR_RC_JOB_RETURN;
8224} 8404}
@@ -8338,7 +8518,6 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8338 } else 8518 } else
8339 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 8519 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8340 IPR_SHUTDOWN_NONE); 8520 IPR_SHUTDOWN_NONE);
8341
8342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8521 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8343 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 8522 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8344 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 8523 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -8404,8 +8583,13 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8404 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), 8583 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8405 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); 8584 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8406 ipr_free_cmd_blks(ioa_cfg); 8585 ipr_free_cmd_blks(ioa_cfg);
8407 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8586
8408 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8587 for (i = 0; i < ioa_cfg->hrrq_num; i++)
8588 pci_free_consistent(ioa_cfg->pdev,
8589 sizeof(u32) * ioa_cfg->hrrq[i].size,
8590 ioa_cfg->hrrq[i].host_rrq,
8591 ioa_cfg->hrrq[i].host_rrq_dma);
8592
8409 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, 8593 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8410 ioa_cfg->u.cfg_table, 8594 ioa_cfg->u.cfg_table,
8411 ioa_cfg->cfg_table_dma); 8595 ioa_cfg->cfg_table_dma);
@@ -8436,8 +8620,20 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8436 struct pci_dev *pdev = ioa_cfg->pdev; 8620 struct pci_dev *pdev = ioa_cfg->pdev;
8437 8621
8438 ENTER; 8622 ENTER;
8439 free_irq(pdev->irq, ioa_cfg); 8623 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8440 pci_disable_msi(pdev); 8624 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8625 int i;
8626 for (i = 0; i < ioa_cfg->nvectors; i++)
8627 free_irq(ioa_cfg->vectors_info[i].vec,
8628 &ioa_cfg->hrrq[i]);
8629 } else
8630 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8631
8632 if (ioa_cfg->intr_flag == IPR_USE_MSI)
8633 pci_disable_msi(pdev);
8634 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
8635 pci_disable_msix(pdev);
8636
8441 iounmap(ioa_cfg->hdw_dma_regs); 8637 iounmap(ioa_cfg->hdw_dma_regs);
8442 pci_release_regions(pdev); 8638 pci_release_regions(pdev);
8443 ipr_free_mem(ioa_cfg); 8639 ipr_free_mem(ioa_cfg);
@@ -8458,7 +8654,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8458 struct ipr_cmnd *ipr_cmd; 8654 struct ipr_cmnd *ipr_cmd;
8459 struct ipr_ioarcb *ioarcb; 8655 struct ipr_ioarcb *ioarcb;
8460 dma_addr_t dma_addr; 8656 dma_addr_t dma_addr;
8461 int i; 8657 int i, entries_each_hrrq, hrrq_id = 0;
8462 8658
8463 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev, 8659 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8464 sizeof(struct ipr_cmnd), 512, 0); 8660 sizeof(struct ipr_cmnd), 512, 0);
@@ -8474,6 +8670,41 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8474 return -ENOMEM; 8670 return -ENOMEM;
8475 } 8671 }
8476 8672
8673 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8674 if (ioa_cfg->hrrq_num > 1) {
8675 if (i == 0) {
8676 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8677 ioa_cfg->hrrq[i].min_cmd_id = 0;
8678 ioa_cfg->hrrq[i].max_cmd_id =
8679 (entries_each_hrrq - 1);
8680 } else {
8681 entries_each_hrrq =
8682 IPR_NUM_BASE_CMD_BLKS/
8683 (ioa_cfg->hrrq_num - 1);
8684 ioa_cfg->hrrq[i].min_cmd_id =
8685 IPR_NUM_INTERNAL_CMD_BLKS +
8686 (i - 1) * entries_each_hrrq;
8687 ioa_cfg->hrrq[i].max_cmd_id =
8688 (IPR_NUM_INTERNAL_CMD_BLKS +
8689 i * entries_each_hrrq - 1);
8690 }
8691 } else {
8692 entries_each_hrrq = IPR_NUM_CMD_BLKS;
8693 ioa_cfg->hrrq[i].min_cmd_id = 0;
8694 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8695 }
8696 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8697 }
8698
8699 BUG_ON(ioa_cfg->hrrq_num == 0);
8700
8701 i = IPR_NUM_CMD_BLKS -
8702 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8703 if (i > 0) {
8704 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8705 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8706 }
8707
8477 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8708 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8478 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 8709 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8479 8710
@@ -8512,7 +8743,11 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8512 ipr_cmd->sense_buffer_dma = dma_addr + 8743 ipr_cmd->sense_buffer_dma = dma_addr +
8513 offsetof(struct ipr_cmnd, sense_buffer); 8744 offsetof(struct ipr_cmnd, sense_buffer);
8514 8745
8515 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 8746 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8747 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8748 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8749 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8750 hrrq_id++;
8516 } 8751 }
8517 8752
8518 return 0; 8753 return 0;
@@ -8562,15 +8797,29 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8562 if (!ioa_cfg->vpd_cbs) 8797 if (!ioa_cfg->vpd_cbs)
8563 goto out_free_res_entries; 8798 goto out_free_res_entries;
8564 8799
8800 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8801 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
8802 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
8803 }
8804
8565 if (ipr_alloc_cmd_blks(ioa_cfg)) 8805 if (ipr_alloc_cmd_blks(ioa_cfg))
8566 goto out_free_vpd_cbs; 8806 goto out_free_vpd_cbs;
8567 8807
8568 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev, 8808 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8569 sizeof(u32) * IPR_NUM_CMD_BLKS, 8809 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8570 &ioa_cfg->host_rrq_dma); 8810 sizeof(u32) * ioa_cfg->hrrq[i].size,
8571 8811 &ioa_cfg->hrrq[i].host_rrq_dma);
8572 if (!ioa_cfg->host_rrq) 8812
8573 goto out_ipr_free_cmd_blocks; 8813 if (!ioa_cfg->hrrq[i].host_rrq) {
8814 while (--i > 0)
8815 pci_free_consistent(pdev,
8816 sizeof(u32) * ioa_cfg->hrrq[i].size,
8817 ioa_cfg->hrrq[i].host_rrq,
8818 ioa_cfg->hrrq[i].host_rrq_dma);
8819 goto out_ipr_free_cmd_blocks;
8820 }
8821 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
8822 }
8574 8823
8575 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 8824 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8576 ioa_cfg->cfg_table_size, 8825 ioa_cfg->cfg_table_size,
@@ -8614,8 +8863,12 @@ out_free_hostrcb_dma:
8614 ioa_cfg->u.cfg_table, 8863 ioa_cfg->u.cfg_table,
8615 ioa_cfg->cfg_table_dma); 8864 ioa_cfg->cfg_table_dma);
8616out_free_host_rrq: 8865out_free_host_rrq:
8617 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8866 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8618 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8867 pci_free_consistent(pdev,
8868 sizeof(u32) * ioa_cfg->hrrq[i].size,
8869 ioa_cfg->hrrq[i].host_rrq,
8870 ioa_cfg->hrrq[i].host_rrq_dma);
8871 }
8619out_ipr_free_cmd_blocks: 8872out_ipr_free_cmd_blocks:
8620 ipr_free_cmd_blks(ioa_cfg); 8873 ipr_free_cmd_blks(ioa_cfg);
8621out_free_vpd_cbs: 8874out_free_vpd_cbs:
@@ -8673,15 +8926,11 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8673 ioa_cfg->doorbell = IPR_DOORBELL; 8926 ioa_cfg->doorbell = IPR_DOORBELL;
8674 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); 8927 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8675 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); 8928 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8676 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8677 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8678 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); 8929 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8679 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); 8930 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8680 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); 8931 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8681 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); 8932 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8682 8933
8683 INIT_LIST_HEAD(&ioa_cfg->free_q);
8684 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8685 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 8934 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8686 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 8935 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8687 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 8936 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
@@ -8759,6 +9008,88 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
8759 return NULL; 9008 return NULL;
8760} 9009}
8761 9010
9011static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9012{
9013 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9014 int i, err, vectors;
9015
9016 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9017 entries[i].entry = i;
9018
9019 vectors = ipr_number_of_msix;
9020
9021 while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9022 vectors = err;
9023
9024 if (err < 0) {
9025 pci_disable_msix(ioa_cfg->pdev);
9026 return err;
9027 }
9028
9029 if (!err) {
9030 for (i = 0; i < vectors; i++)
9031 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9032 ioa_cfg->nvectors = vectors;
9033 }
9034
9035 return err;
9036}
9037
9038static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9039{
9040 int i, err, vectors;
9041
9042 vectors = ipr_number_of_msix;
9043
9044 while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9045 vectors = err;
9046
9047 if (err < 0) {
9048 pci_disable_msi(ioa_cfg->pdev);
9049 return err;
9050 }
9051
9052 if (!err) {
9053 for (i = 0; i < vectors; i++)
9054 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9055 ioa_cfg->nvectors = vectors;
9056 }
9057
9058 return err;
9059}
9060
9061static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9062{
9063 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9064
9065 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9066 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9067 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9068 ioa_cfg->vectors_info[vec_idx].
9069 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9070 }
9071}
9072
9073static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9074{
9075 int i, rc;
9076
9077 for (i = 1; i < ioa_cfg->nvectors; i++) {
9078 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9079 ipr_isr_mhrrq,
9080 0,
9081 ioa_cfg->vectors_info[i].desc,
9082 &ioa_cfg->hrrq[i]);
9083 if (rc) {
9084 while (--i >= 0)
9085 free_irq(ioa_cfg->vectors_info[i].vec,
9086 &ioa_cfg->hrrq[i]);
9087 return rc;
9088 }
9089 }
9090 return 0;
9091}
9092
8762/** 9093/**
8763 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi(). 9094 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8764 * @pdev: PCI device struct 9095 * @pdev: PCI device struct
@@ -8775,6 +9106,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
8775 unsigned long lock_flags = 0; 9106 unsigned long lock_flags = 0;
8776 irqreturn_t rc = IRQ_HANDLED; 9107 irqreturn_t rc = IRQ_HANDLED;
8777 9108
9109 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
8778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9110 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8779 9111
8780 ioa_cfg->msi_received = 1; 9112 ioa_cfg->msi_received = 1;
@@ -8841,8 +9173,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
8841 return rc; 9173 return rc;
8842} 9174}
8843 9175
8844/** 9176 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
8845 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8846 * @pdev: PCI device struct 9177 * @pdev: PCI device struct
8847 * @dev_id: PCI device id struct 9178 * @dev_id: PCI device id struct
8848 * 9179 *
@@ -8953,17 +9284,56 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
8953 goto cleanup_nomem; 9284 goto cleanup_nomem;
8954 } 9285 }
8955 9286
8956 /* Enable MSI style interrupts if they are supported. */ 9287 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
8957 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) { 9288 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9289 IPR_MAX_MSIX_VECTORS);
9290 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9291 }
9292
9293 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9294 ipr_enable_msix(ioa_cfg) == 0)
9295 ioa_cfg->intr_flag = IPR_USE_MSIX;
9296 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9297 ipr_enable_msi(ioa_cfg) == 0)
9298 ioa_cfg->intr_flag = IPR_USE_MSI;
9299 else {
9300 ioa_cfg->intr_flag = IPR_USE_LSI;
9301 ioa_cfg->nvectors = 1;
9302 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9303 }
9304
9305 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9306 ioa_cfg->intr_flag == IPR_USE_MSIX) {
8958 rc = ipr_test_msi(ioa_cfg, pdev); 9307 rc = ipr_test_msi(ioa_cfg, pdev);
8959 if (rc == -EOPNOTSUPP) 9308 if (rc == -EOPNOTSUPP) {
8960 pci_disable_msi(pdev); 9309 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9310 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9311 pci_disable_msi(pdev);
9312 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9313 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9314 pci_disable_msix(pdev);
9315 }
9316
9317 ioa_cfg->intr_flag = IPR_USE_LSI;
9318 ioa_cfg->nvectors = 1;
9319 }
8961 else if (rc) 9320 else if (rc)
8962 goto out_msi_disable; 9321 goto out_msi_disable;
8963 else 9322 else {
8964 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq); 9323 if (ioa_cfg->intr_flag == IPR_USE_MSI)
8965 } else if (ipr_debug) 9324 dev_info(&pdev->dev,
8966 dev_info(&pdev->dev, "Cannot enable MSI.\n"); 9325 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9326 ioa_cfg->nvectors, pdev->irq);
9327 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9328 dev_info(&pdev->dev,
9329 "Request for %d MSIXs succeeded.",
9330 ioa_cfg->nvectors);
9331 }
9332 }
9333
9334 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9335 (unsigned int)num_online_cpus(),
9336 (unsigned int)IPR_MAX_HRRQ_NUM);
8967 9337
8968 /* Save away PCI config space for use following IOA reset */ 9338 /* Save away PCI config space for use following IOA reset */
8969 rc = pci_save_state(pdev); 9339 rc = pci_save_state(pdev);
@@ -9011,10 +9381,21 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
9011 ioa_cfg->ioa_unit_checked = 1; 9381 ioa_cfg->ioa_unit_checked = 1;
9012 9382
9013 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 9383 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9014 rc = request_irq(pdev->irq, ipr_isr,
9015 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
9016 IPR_NAME, ioa_cfg);
9017 9384
9385 if (ioa_cfg->intr_flag == IPR_USE_MSI
9386 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9387 name_msi_vectors(ioa_cfg);
9388 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9389 0,
9390 ioa_cfg->vectors_info[0].desc,
9391 &ioa_cfg->hrrq[0]);
9392 if (!rc)
9393 rc = ipr_request_other_msi_irqs(ioa_cfg);
9394 } else {
9395 rc = request_irq(pdev->irq, ipr_isr,
9396 IRQF_SHARED,
9397 IPR_NAME, &ioa_cfg->hrrq[0]);
9398 }
9018 if (rc) { 9399 if (rc) {
9019 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", 9400 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9020 pdev->irq, rc); 9401 pdev->irq, rc);
@@ -9039,7 +9420,10 @@ out:
9039cleanup_nolog: 9420cleanup_nolog:
9040 ipr_free_mem(ioa_cfg); 9421 ipr_free_mem(ioa_cfg);
9041out_msi_disable: 9422out_msi_disable:
9042 pci_disable_msi(pdev); 9423 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9424 pci_disable_msi(pdev);
9425 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9426 pci_disable_msix(pdev);
9043cleanup_nomem: 9427cleanup_nomem:
9044 iounmap(ipr_regs); 9428 iounmap(ipr_regs);
9045out_release_regions: 9429out_release_regions:
@@ -9361,9 +9745,7 @@ static struct pci_driver ipr_driver = {
9361 **/ 9745 **/
9362static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) 9746static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9363{ 9747{
9364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 9748 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9365
9366 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9367} 9749}
9368 9750
9369/** 9751/**
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 449309a21107..325cb2dd9a90 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -303,6 +303,9 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
303 * Misc literals 303 * Misc literals
304 */ 304 */
305#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST 305#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
306#define IPR_MAX_MSIX_VECTORS 0x5
307#define IPR_MAX_HRRQ_NUM 0x10
308#define IPR_INIT_HRRQ 0x0
306 309
307/* 310/*
308 * Adapter interface types 311 * Adapter interface types
@@ -464,9 +467,36 @@ struct ipr_supported_device {
464 u8 reserved2[16]; 467 u8 reserved2[16];
465}__attribute__((packed, aligned (4))); 468}__attribute__((packed, aligned (4)));
466 469
470struct ipr_hrr_queue {
471 struct ipr_ioa_cfg *ioa_cfg;
472 __be32 *host_rrq;
473 dma_addr_t host_rrq_dma;
474#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
475#define IPR_HRRQ_RESP_BIT_SET 0x00000002
476#define IPR_HRRQ_TOGGLE_BIT 0x00000001
477#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
478#define IPR_ID_HRRQ_SELE_ENABLE 0x02
479 volatile __be32 *hrrq_start;
480 volatile __be32 *hrrq_end;
481 volatile __be32 *hrrq_curr;
482
483 struct list_head hrrq_free_q;
484 struct list_head hrrq_pending_q;
485
486 volatile u32 toggle_bit;
487 u32 size;
488 u32 min_cmd_id;
489 u32 max_cmd_id;
490};
491
492#define for_each_hrrq(hrrq, ioa_cfg) \
493 for (hrrq = (ioa_cfg)->hrrq; \
494 hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
495
467/* Command packet structure */ 496/* Command packet structure */
468struct ipr_cmd_pkt { 497struct ipr_cmd_pkt {
469 __be16 reserved; /* Reserved by IOA */ 498 u8 reserved; /* Reserved by IOA */
499 u8 hrrq_id;
470 u8 request_type; 500 u8 request_type;
471#define IPR_RQTYPE_SCSICDB 0x00 501#define IPR_RQTYPE_SCSICDB 0x00
472#define IPR_RQTYPE_IOACMD 0x01 502#define IPR_RQTYPE_IOACMD 0x01
@@ -1322,6 +1352,7 @@ struct ipr_chip_t {
1322 u16 intr_type; 1352 u16 intr_type;
1323#define IPR_USE_LSI 0x00 1353#define IPR_USE_LSI 0x00
1324#define IPR_USE_MSI 0x01 1354#define IPR_USE_MSI 0x01
1355#define IPR_USE_MSIX 0x02
1325 u16 sis_type; 1356 u16 sis_type;
1326#define IPR_SIS32 0x00 1357#define IPR_SIS32 0x00
1327#define IPR_SIS64 0x01 1358#define IPR_SIS64 0x01
@@ -1420,20 +1451,6 @@ struct ipr_ioa_cfg {
1420 struct ipr_trace_entry *trace; 1451 struct ipr_trace_entry *trace;
1421 u32 trace_index:IPR_NUM_TRACE_INDEX_BITS; 1452 u32 trace_index:IPR_NUM_TRACE_INDEX_BITS;
1422 1453
1423 /*
1424 * Queue for free command blocks
1425 */
1426 char ipr_free_label[8];
1427#define IPR_FREEQ_LABEL "free-q"
1428 struct list_head free_q;
1429
1430 /*
1431 * Queue for command blocks outstanding to the adapter
1432 */
1433 char ipr_pending_label[8];
1434#define IPR_PENDQ_LABEL "pend-q"
1435 struct list_head pending_q;
1436
1437 char cfg_table_start[8]; 1454 char cfg_table_start[8];
1438#define IPR_CFG_TBL_START "cfg" 1455#define IPR_CFG_TBL_START "cfg"
1439 union { 1456 union {
@@ -1457,16 +1474,9 @@ struct ipr_ioa_cfg {
1457 struct list_head hostrcb_free_q; 1474 struct list_head hostrcb_free_q;
1458 struct list_head hostrcb_pending_q; 1475 struct list_head hostrcb_pending_q;
1459 1476
1460 __be32 *host_rrq; 1477 struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
1461 dma_addr_t host_rrq_dma; 1478 u32 hrrq_num;
1462#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc 1479 u32 hrrq_index;
1463#define IPR_HRRQ_RESP_BIT_SET 0x00000002
1464#define IPR_HRRQ_TOGGLE_BIT 0x00000001
1465#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
1466 volatile __be32 *hrrq_start;
1467 volatile __be32 *hrrq_end;
1468 volatile __be32 *hrrq_curr;
1469 volatile u32 toggle_bit;
1470 1480
1471 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; 1481 struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
1472 1482
@@ -1512,6 +1522,15 @@ struct ipr_ioa_cfg {
1512 u32 max_cmds; 1522 u32 max_cmds;
1513 struct ipr_cmnd **ipr_cmnd_list; 1523 struct ipr_cmnd **ipr_cmnd_list;
1514 dma_addr_t *ipr_cmnd_list_dma; 1524 dma_addr_t *ipr_cmnd_list_dma;
1525
1526 u16 intr_flag;
1527 unsigned int nvectors;
1528
1529 struct {
1530 unsigned short vec;
1531 char desc[22];
1532 } vectors_info[IPR_MAX_MSIX_VECTORS];
1533
1515}; /* struct ipr_ioa_cfg */ 1534}; /* struct ipr_ioa_cfg */
1516 1535
1517struct ipr_cmnd { 1536struct ipr_cmnd {
@@ -1549,6 +1568,7 @@ struct ipr_cmnd {
1549 struct scsi_device *sdev; 1568 struct scsi_device *sdev;
1550 } u; 1569 } u;
1551 1570
1571 struct ipr_hrr_queue *hrrq;
1552 struct ipr_ioa_cfg *ioa_cfg; 1572 struct ipr_ioa_cfg *ioa_cfg;
1553}; 1573};
1554 1574