diff options
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r-- | drivers/scsi/ipr.c | 168 |
1 files changed, 118 insertions, 50 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 45e192a51005..e3f29f61cbc3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -566,6 +566,23 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, | |||
566 | #endif | 566 | #endif |
567 | 567 | ||
568 | /** | 568 | /** |
569 | * ipr_lock_and_done - Acquire lock and complete command | ||
570 | * @ipr_cmd: ipr command struct | ||
571 | * | ||
572 | * Return value: | ||
573 | * none | ||
574 | **/ | ||
575 | static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) | ||
576 | { | ||
577 | unsigned long lock_flags; | ||
578 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
579 | |||
580 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
581 | ipr_cmd->done(ipr_cmd); | ||
582 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
583 | } | ||
584 | |||
585 | /** | ||
569 | * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse | 586 | * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse |
570 | * @ipr_cmd: ipr command struct | 587 | * @ipr_cmd: ipr command struct |
571 | * | 588 | * |
@@ -611,34 +628,50 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) | |||
611 | * Return value: | 628 | * Return value: |
612 | * none | 629 | * none |
613 | **/ | 630 | **/ |
614 | static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) | 631 | static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, |
632 | void (*fast_done) (struct ipr_cmnd *)) | ||
615 | { | 633 | { |
616 | ipr_reinit_ipr_cmnd(ipr_cmd); | 634 | ipr_reinit_ipr_cmnd(ipr_cmd); |
617 | ipr_cmd->u.scratch = 0; | 635 | ipr_cmd->u.scratch = 0; |
618 | ipr_cmd->sibling = NULL; | 636 | ipr_cmd->sibling = NULL; |
637 | ipr_cmd->fast_done = fast_done; | ||
619 | init_timer(&ipr_cmd->timer); | 638 | init_timer(&ipr_cmd->timer); |
620 | } | 639 | } |
621 | 640 | ||
622 | /** | 641 | /** |
623 | * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block | 642 | * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block |
624 | * @ioa_cfg: ioa config struct | 643 | * @ioa_cfg: ioa config struct |
625 | * | 644 | * |
626 | * Return value: | 645 | * Return value: |
627 | * pointer to ipr command struct | 646 | * pointer to ipr command struct |
628 | **/ | 647 | **/ |
629 | static | 648 | static |
630 | struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) | 649 | struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) |
631 | { | 650 | { |
632 | struct ipr_cmnd *ipr_cmd; | 651 | struct ipr_cmnd *ipr_cmd; |
633 | 652 | ||
634 | ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); | 653 | ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); |
635 | list_del(&ipr_cmd->queue); | 654 | list_del(&ipr_cmd->queue); |
636 | ipr_init_ipr_cmnd(ipr_cmd); | ||
637 | 655 | ||
638 | return ipr_cmd; | 656 | return ipr_cmd; |
639 | } | 657 | } |
640 | 658 | ||
641 | /** | 659 | /** |
660 | * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it | ||
661 | * @ioa_cfg: ioa config struct | ||
662 | * | ||
663 | * Return value: | ||
664 | * pointer to ipr command struct | ||
665 | **/ | ||
666 | static | ||
667 | struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) | ||
668 | { | ||
669 | struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); | ||
670 | ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); | ||
671 | return ipr_cmd; | ||
672 | } | ||
673 | |||
674 | /** | ||
642 | * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts | 675 | * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts |
643 | * @ioa_cfg: ioa config struct | 676 | * @ioa_cfg: ioa config struct |
644 | * @clr_ints: interrupts to clear | 677 | * @clr_ints: interrupts to clear |
@@ -5116,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5116 | u16 cmd_index; | 5149 | u16 cmd_index; |
5117 | int num_hrrq = 0; | 5150 | int num_hrrq = 0; |
5118 | int irq_none = 0; | 5151 | int irq_none = 0; |
5119 | struct ipr_cmnd *ipr_cmd; | 5152 | struct ipr_cmnd *ipr_cmd, *temp; |
5120 | irqreturn_t rc = IRQ_NONE; | 5153 | irqreturn_t rc = IRQ_NONE; |
5154 | LIST_HEAD(doneq); | ||
5121 | 5155 | ||
5122 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 5156 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
5123 | 5157 | ||
@@ -5138,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5138 | 5172 | ||
5139 | if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { | 5173 | if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { |
5140 | ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA"); | 5174 | ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA"); |
5141 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 5175 | rc = IRQ_HANDLED; |
5142 | return IRQ_HANDLED; | 5176 | goto unlock_out; |
5143 | } | 5177 | } |
5144 | 5178 | ||
5145 | ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; | 5179 | ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; |
@@ -5148,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5148 | 5182 | ||
5149 | ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); | 5183 | ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); |
5150 | 5184 | ||
5151 | list_del(&ipr_cmd->queue); | 5185 | list_move_tail(&ipr_cmd->queue, &doneq); |
5152 | del_timer(&ipr_cmd->timer); | ||
5153 | ipr_cmd->done(ipr_cmd); | ||
5154 | 5186 | ||
5155 | rc = IRQ_HANDLED; | 5187 | rc = IRQ_HANDLED; |
5156 | 5188 | ||
@@ -5180,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5180 | } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && | 5212 | } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && |
5181 | int_reg & IPR_PCII_HRRQ_UPDATED) { | 5213 | int_reg & IPR_PCII_HRRQ_UPDATED) { |
5182 | ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); | 5214 | ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); |
5183 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 5215 | rc = IRQ_HANDLED; |
5184 | return IRQ_HANDLED; | 5216 | goto unlock_out; |
5185 | } else | 5217 | } else |
5186 | break; | 5218 | break; |
5187 | } | 5219 | } |
@@ -5189,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5189 | if (unlikely(rc == IRQ_NONE)) | 5221 | if (unlikely(rc == IRQ_NONE)) |
5190 | rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); | 5222 | rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); |
5191 | 5223 | ||
5224 | unlock_out: | ||
5192 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 5225 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
5226 | list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { | ||
5227 | list_del(&ipr_cmd->queue); | ||
5228 | del_timer(&ipr_cmd->timer); | ||
5229 | ipr_cmd->fast_done(ipr_cmd); | ||
5230 | } | ||
5231 | |||
5193 | return rc; | 5232 | return rc; |
5194 | } | 5233 | } |
5195 | 5234 | ||
@@ -5770,21 +5809,28 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) | |||
5770 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5809 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
5771 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 5810 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
5772 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); | 5811 | u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); |
5812 | unsigned long lock_flags; | ||
5773 | 5813 | ||
5774 | scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); | 5814 | scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); |
5775 | 5815 | ||
5776 | if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { | 5816 | if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { |
5777 | scsi_dma_unmap(ipr_cmd->scsi_cmd); | 5817 | scsi_dma_unmap(scsi_cmd); |
5818 | |||
5819 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
5778 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 5820 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
5779 | scsi_cmd->scsi_done(scsi_cmd); | 5821 | scsi_cmd->scsi_done(scsi_cmd); |
5780 | } else | 5822 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
5823 | } else { | ||
5824 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
5781 | ipr_erp_start(ioa_cfg, ipr_cmd); | 5825 | ipr_erp_start(ioa_cfg, ipr_cmd); |
5826 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
5827 | } | ||
5782 | } | 5828 | } |
5783 | 5829 | ||
5784 | /** | 5830 | /** |
5785 | * ipr_queuecommand - Queue a mid-layer request | 5831 | * ipr_queuecommand - Queue a mid-layer request |
5832 | * @shost: scsi host struct | ||
5786 | * @scsi_cmd: scsi command struct | 5833 | * @scsi_cmd: scsi command struct |
5787 | * @done: done function | ||
5788 | * | 5834 | * |
5789 | * This function queues a request generated by the mid-layer. | 5835 | * This function queues a request generated by the mid-layer. |
5790 | * | 5836 | * |
@@ -5793,61 +5839,61 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) | |||
5793 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy | 5839 | * SCSI_MLQUEUE_DEVICE_BUSY if device is busy |
5794 | * SCSI_MLQUEUE_HOST_BUSY if host is busy | 5840 | * SCSI_MLQUEUE_HOST_BUSY if host is busy |
5795 | **/ | 5841 | **/ |
5796 | static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, | 5842 | static int ipr_queuecommand(struct Scsi_Host *shost, |
5797 | void (*done) (struct scsi_cmnd *)) | 5843 | struct scsi_cmnd *scsi_cmd) |
5798 | { | 5844 | { |
5799 | struct ipr_ioa_cfg *ioa_cfg; | 5845 | struct ipr_ioa_cfg *ioa_cfg; |
5800 | struct ipr_resource_entry *res; | 5846 | struct ipr_resource_entry *res; |
5801 | struct ipr_ioarcb *ioarcb; | 5847 | struct ipr_ioarcb *ioarcb; |
5802 | struct ipr_cmnd *ipr_cmd; | 5848 | struct ipr_cmnd *ipr_cmd; |
5803 | int rc = 0; | 5849 | unsigned long lock_flags; |
5850 | int rc; | ||
5804 | 5851 | ||
5805 | scsi_cmd->scsi_done = done; | 5852 | ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; |
5806 | ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; | 5853 | |
5807 | res = scsi_cmd->device->hostdata; | 5854 | spin_lock_irqsave(shost->host_lock, lock_flags); |
5808 | scsi_cmd->result = (DID_OK << 16); | 5855 | scsi_cmd->result = (DID_OK << 16); |
5856 | res = scsi_cmd->device->hostdata; | ||
5809 | 5857 | ||
5810 | /* | 5858 | /* |
5811 | * We are currently blocking all devices due to a host reset | 5859 | * We are currently blocking all devices due to a host reset |
5812 | * We have told the host to stop giving us new requests, but | 5860 | * We have told the host to stop giving us new requests, but |
5813 | * ERP ops don't count. FIXME | 5861 | * ERP ops don't count. FIXME |
5814 | */ | 5862 | */ |
5815 | if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) | 5863 | if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) { |
5864 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | ||
5816 | return SCSI_MLQUEUE_HOST_BUSY; | 5865 | return SCSI_MLQUEUE_HOST_BUSY; |
5866 | } | ||
5817 | 5867 | ||
5818 | /* | 5868 | /* |
5819 | * FIXME - Create scsi_set_host_offline interface | 5869 | * FIXME - Create scsi_set_host_offline interface |
5820 | * and the ioa_is_dead check can be removed | 5870 | * and the ioa_is_dead check can be removed |
5821 | */ | 5871 | */ |
5822 | if (unlikely(ioa_cfg->ioa_is_dead || !res)) { | 5872 | if (unlikely(ioa_cfg->ioa_is_dead || !res)) { |
5823 | memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | 5873 | spin_unlock_irqrestore(shost->host_lock, lock_flags); |
5824 | scsi_cmd->result = (DID_NO_CONNECT << 16); | 5874 | goto err_nodev; |
5825 | scsi_cmd->scsi_done(scsi_cmd); | 5875 | } |
5826 | return 0; | 5876 | |
5877 | if (ipr_is_gata(res) && res->sata_port) { | ||
5878 | rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); | ||
5879 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | ||
5880 | return rc; | ||
5827 | } | 5881 | } |
5828 | 5882 | ||
5829 | if (ipr_is_gata(res) && res->sata_port) | 5883 | ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); |
5830 | return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); | 5884 | spin_unlock_irqrestore(shost->host_lock, lock_flags); |
5831 | 5885 | ||
5832 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); | 5886 | ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); |
5833 | ioarcb = &ipr_cmd->ioarcb; | 5887 | ioarcb = &ipr_cmd->ioarcb; |
5834 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); | ||
5835 | 5888 | ||
5836 | memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); | 5889 | memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); |
5837 | ipr_cmd->scsi_cmd = scsi_cmd; | 5890 | ipr_cmd->scsi_cmd = scsi_cmd; |
5838 | ioarcb->res_handle = res->res_handle; | 5891 | ipr_cmd->done = ipr_scsi_eh_done; |
5839 | ipr_cmd->done = ipr_scsi_done; | ||
5840 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); | ||
5841 | 5892 | ||
5842 | if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { | 5893 | if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { |
5843 | if (scsi_cmd->underflow == 0) | 5894 | if (scsi_cmd->underflow == 0) |
5844 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; | 5895 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; |
5845 | 5896 | ||
5846 | if (res->needs_sync_complete) { | ||
5847 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; | ||
5848 | res->needs_sync_complete = 0; | ||
5849 | } | ||
5850 | |||
5851 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; | 5897 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; |
5852 | if (ipr_is_gscsi(res)) | 5898 | if (ipr_is_gscsi(res)) |
5853 | ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; | 5899 | ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; |
@@ -5859,23 +5905,46 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, | |||
5859 | (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) | 5905 | (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) |
5860 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; | 5906 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; |
5861 | 5907 | ||
5862 | if (likely(rc == 0)) { | 5908 | if (ioa_cfg->sis64) |
5863 | if (ioa_cfg->sis64) | 5909 | rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); |
5864 | rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); | 5910 | else |
5865 | else | 5911 | rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); |
5866 | rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); | ||
5867 | } | ||
5868 | 5912 | ||
5869 | if (unlikely(rc != 0)) { | 5913 | spin_lock_irqsave(shost->host_lock, lock_flags); |
5870 | list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 5914 | if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) { |
5915 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | ||
5916 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | ||
5917 | if (!rc) | ||
5918 | scsi_dma_unmap(scsi_cmd); | ||
5871 | return SCSI_MLQUEUE_HOST_BUSY; | 5919 | return SCSI_MLQUEUE_HOST_BUSY; |
5872 | } | 5920 | } |
5873 | 5921 | ||
5922 | if (unlikely(ioa_cfg->ioa_is_dead)) { | ||
5923 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | ||
5924 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | ||
5925 | scsi_dma_unmap(scsi_cmd); | ||
5926 | goto err_nodev; | ||
5927 | } | ||
5928 | |||
5929 | ioarcb->res_handle = res->res_handle; | ||
5930 | if (res->needs_sync_complete) { | ||
5931 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; | ||
5932 | res->needs_sync_complete = 0; | ||
5933 | } | ||
5934 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); | ||
5935 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); | ||
5874 | ipr_send_command(ipr_cmd); | 5936 | ipr_send_command(ipr_cmd); |
5937 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | ||
5875 | return 0; | 5938 | return 0; |
5876 | } | ||
5877 | 5939 | ||
5878 | static DEF_SCSI_QCMD(ipr_queuecommand) | 5940 | err_nodev: |
5941 | spin_lock_irqsave(shost->host_lock, lock_flags); | ||
5942 | memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); | ||
5943 | scsi_cmd->result = (DID_NO_CONNECT << 16); | ||
5944 | scsi_cmd->scsi_done(scsi_cmd); | ||
5945 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | ||
5946 | return 0; | ||
5947 | } | ||
5879 | 5948 | ||
5880 | /** | 5949 | /** |
5881 | * ipr_ioctl - IOCTL handler | 5950 | * ipr_ioctl - IOCTL handler |
@@ -8775,8 +8844,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
8775 | 8844 | ||
8776 | ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; | 8845 | ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; |
8777 | memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); | 8846 | memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); |
8778 | ata_host_init(&ioa_cfg->ata_host, &pdev->dev, | 8847 | ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); |
8779 | sata_port_info.flags, &ipr_sata_ops); | ||
8780 | 8848 | ||
8781 | ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); | 8849 | ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); |
8782 | 8850 | ||