diff options
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r-- | drivers/scsi/ipr.c | 158 |
1 files changed, 110 insertions, 48 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 0621238fac4a..12868ca46110 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/errno.h> | 60 | #include <linux/errno.h> |
61 | #include <linux/kernel.h> | 61 | #include <linux/kernel.h> |
62 | #include <linux/slab.h> | 62 | #include <linux/slab.h> |
63 | #include <linux/vmalloc.h> | ||
63 | #include <linux/ioport.h> | 64 | #include <linux/ioport.h> |
64 | #include <linux/delay.h> | 65 | #include <linux/delay.h> |
65 | #include <linux/pci.h> | 66 | #include <linux/pci.h> |
@@ -2717,13 +2718,18 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, | |||
2717 | unsigned long pci_address, u32 length) | 2718 | unsigned long pci_address, u32 length) |
2718 | { | 2719 | { |
2719 | int bytes_copied = 0; | 2720 | int bytes_copied = 0; |
2720 | int cur_len, rc, rem_len, rem_page_len; | 2721 | int cur_len, rc, rem_len, rem_page_len, max_dump_size; |
2721 | __be32 *page; | 2722 | __be32 *page; |
2722 | unsigned long lock_flags = 0; | 2723 | unsigned long lock_flags = 0; |
2723 | struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; | 2724 | struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; |
2724 | 2725 | ||
2726 | if (ioa_cfg->sis64) | ||
2727 | max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; | ||
2728 | else | ||
2729 | max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; | ||
2730 | |||
2725 | while (bytes_copied < length && | 2731 | while (bytes_copied < length && |
2726 | (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { | 2732 | (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { |
2727 | if (ioa_dump->page_offset >= PAGE_SIZE || | 2733 | if (ioa_dump->page_offset >= PAGE_SIZE || |
2728 | ioa_dump->page_offset == 0) { | 2734 | ioa_dump->page_offset == 0) { |
2729 | page = (__be32 *)__get_free_page(GFP_ATOMIC); | 2735 | page = (__be32 *)__get_free_page(GFP_ATOMIC); |
@@ -2885,8 +2891,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2885 | unsigned long lock_flags = 0; | 2891 | unsigned long lock_flags = 0; |
2886 | struct ipr_driver_dump *driver_dump = &dump->driver_dump; | 2892 | struct ipr_driver_dump *driver_dump = &dump->driver_dump; |
2887 | struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; | 2893 | struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; |
2888 | u32 num_entries, start_off, end_off; | 2894 | u32 num_entries, max_num_entries, start_off, end_off; |
2889 | u32 bytes_to_copy, bytes_copied, rc; | 2895 | u32 max_dump_size, bytes_to_copy, bytes_copied, rc; |
2890 | struct ipr_sdt *sdt; | 2896 | struct ipr_sdt *sdt; |
2891 | int valid = 1; | 2897 | int valid = 1; |
2892 | int i; | 2898 | int i; |
@@ -2947,8 +2953,18 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2947 | on entries in this table */ | 2953 | on entries in this table */ |
2948 | sdt = &ioa_dump->sdt; | 2954 | sdt = &ioa_dump->sdt; |
2949 | 2955 | ||
2956 | if (ioa_cfg->sis64) { | ||
2957 | max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; | ||
2958 | max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; | ||
2959 | } else { | ||
2960 | max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; | ||
2961 | max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; | ||
2962 | } | ||
2963 | |||
2964 | bytes_to_copy = offsetof(struct ipr_sdt, entry) + | ||
2965 | (max_num_entries * sizeof(struct ipr_sdt_entry)); | ||
2950 | rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, | 2966 | rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, |
2951 | sizeof(struct ipr_sdt) / sizeof(__be32)); | 2967 | bytes_to_copy / sizeof(__be32)); |
2952 | 2968 | ||
2953 | /* Smart Dump table is ready to use and the first entry is valid */ | 2969 | /* Smart Dump table is ready to use and the first entry is valid */ |
2954 | if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && | 2970 | if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && |
@@ -2964,13 +2980,20 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2964 | 2980 | ||
2965 | num_entries = be32_to_cpu(sdt->hdr.num_entries_used); | 2981 | num_entries = be32_to_cpu(sdt->hdr.num_entries_used); |
2966 | 2982 | ||
2967 | if (num_entries > IPR_NUM_SDT_ENTRIES) | 2983 | if (num_entries > max_num_entries) |
2968 | num_entries = IPR_NUM_SDT_ENTRIES; | 2984 | num_entries = max_num_entries; |
2985 | |||
2986 | /* Update dump length to the actual data to be copied */ | ||
2987 | dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); | ||
2988 | if (ioa_cfg->sis64) | ||
2989 | dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); | ||
2990 | else | ||
2991 | dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); | ||
2969 | 2992 | ||
2970 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 2993 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
2971 | 2994 | ||
2972 | for (i = 0; i < num_entries; i++) { | 2995 | for (i = 0; i < num_entries; i++) { |
2973 | if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) { | 2996 | if (ioa_dump->hdr.len > max_dump_size) { |
2974 | driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; | 2997 | driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; |
2975 | break; | 2998 | break; |
2976 | } | 2999 | } |
@@ -2989,7 +3012,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2989 | valid = 0; | 3012 | valid = 0; |
2990 | } | 3013 | } |
2991 | if (valid) { | 3014 | if (valid) { |
2992 | if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { | 3015 | if (bytes_to_copy > max_dump_size) { |
2993 | sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; | 3016 | sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; |
2994 | continue; | 3017 | continue; |
2995 | } | 3018 | } |
@@ -3044,6 +3067,7 @@ static void ipr_release_dump(struct kref *kref) | |||
3044 | for (i = 0; i < dump->ioa_dump.next_page_index; i++) | 3067 | for (i = 0; i < dump->ioa_dump.next_page_index; i++) |
3045 | free_page((unsigned long) dump->ioa_dump.ioa_data[i]); | 3068 | free_page((unsigned long) dump->ioa_dump.ioa_data[i]); |
3046 | 3069 | ||
3070 | vfree(dump->ioa_dump.ioa_data); | ||
3047 | kfree(dump); | 3071 | kfree(dump); |
3048 | LEAVE; | 3072 | LEAVE; |
3049 | } | 3073 | } |
@@ -3835,7 +3859,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3835 | struct ipr_dump *dump; | 3859 | struct ipr_dump *dump; |
3836 | unsigned long lock_flags = 0; | 3860 | unsigned long lock_flags = 0; |
3837 | char *src; | 3861 | char *src; |
3838 | int len; | 3862 | int len, sdt_end; |
3839 | size_t rc = count; | 3863 | size_t rc = count; |
3840 | 3864 | ||
3841 | if (!capable(CAP_SYS_ADMIN)) | 3865 | if (!capable(CAP_SYS_ADMIN)) |
@@ -3875,9 +3899,17 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3875 | 3899 | ||
3876 | off -= sizeof(dump->driver_dump); | 3900 | off -= sizeof(dump->driver_dump); |
3877 | 3901 | ||
3878 | if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) { | 3902 | if (ioa_cfg->sis64) |
3879 | if (off + count > offsetof(struct ipr_ioa_dump, ioa_data)) | 3903 | sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + |
3880 | len = offsetof(struct ipr_ioa_dump, ioa_data) - off; | 3904 | (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * |
3905 | sizeof(struct ipr_sdt_entry)); | ||
3906 | else | ||
3907 | sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + | ||
3908 | (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); | ||
3909 | |||
3910 | if (count && off < sdt_end) { | ||
3911 | if (off + count > sdt_end) | ||
3912 | len = sdt_end - off; | ||
3881 | else | 3913 | else |
3882 | len = count; | 3914 | len = count; |
3883 | src = (u8 *)&dump->ioa_dump + off; | 3915 | src = (u8 *)&dump->ioa_dump + off; |
@@ -3887,7 +3919,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3887 | count -= len; | 3919 | count -= len; |
3888 | } | 3920 | } |
3889 | 3921 | ||
3890 | off -= offsetof(struct ipr_ioa_dump, ioa_data); | 3922 | off -= sdt_end; |
3891 | 3923 | ||
3892 | while (count) { | 3924 | while (count) { |
3893 | if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) | 3925 | if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) |
@@ -3916,6 +3948,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, | |||
3916 | static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) | 3948 | static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) |
3917 | { | 3949 | { |
3918 | struct ipr_dump *dump; | 3950 | struct ipr_dump *dump; |
3951 | __be32 **ioa_data; | ||
3919 | unsigned long lock_flags = 0; | 3952 | unsigned long lock_flags = 0; |
3920 | 3953 | ||
3921 | dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); | 3954 | dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); |
@@ -3925,6 +3958,19 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) | |||
3925 | return -ENOMEM; | 3958 | return -ENOMEM; |
3926 | } | 3959 | } |
3927 | 3960 | ||
3961 | if (ioa_cfg->sis64) | ||
3962 | ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); | ||
3963 | else | ||
3964 | ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); | ||
3965 | |||
3966 | if (!ioa_data) { | ||
3967 | ipr_err("Dump memory allocation failed\n"); | ||
3968 | kfree(dump); | ||
3969 | return -ENOMEM; | ||
3970 | } | ||
3971 | |||
3972 | dump->ioa_dump.ioa_data = ioa_data; | ||
3973 | |||
3928 | kref_init(&dump->kref); | 3974 | kref_init(&dump->kref); |
3929 | dump->ioa_cfg = ioa_cfg; | 3975 | dump->ioa_cfg = ioa_cfg; |
3930 | 3976 | ||
@@ -3932,6 +3978,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) | |||
3932 | 3978 | ||
3933 | if (INACTIVE != ioa_cfg->sdt_state) { | 3979 | if (INACTIVE != ioa_cfg->sdt_state) { |
3934 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3980 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
3981 | vfree(dump->ioa_dump.ioa_data); | ||
3935 | kfree(dump); | 3982 | kfree(dump); |
3936 | return 0; | 3983 | return 0; |
3937 | } | 3984 | } |
@@ -4953,9 +5000,35 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) | |||
4953 | * IRQ_NONE / IRQ_HANDLED | 5000 | * IRQ_NONE / IRQ_HANDLED |
4954 | **/ | 5001 | **/ |
4955 | static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, | 5002 | static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, |
4956 | volatile u32 int_reg) | 5003 | u32 int_reg) |
4957 | { | 5004 | { |
4958 | irqreturn_t rc = IRQ_HANDLED; | 5005 | irqreturn_t rc = IRQ_HANDLED; |
5006 | u32 int_mask_reg; | ||
5007 | |||
5008 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); | ||
5009 | int_reg &= ~int_mask_reg; | ||
5010 | |||
5011 | /* If an interrupt on the adapter did not occur, ignore it. | ||
5012 | * Or in the case of SIS 64, check for a stage change interrupt. | ||
5013 | */ | ||
5014 | if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { | ||
5015 | if (ioa_cfg->sis64) { | ||
5016 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
5017 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
5018 | if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { | ||
5019 | |||
5020 | /* clear stage change */ | ||
5021 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); | ||
5022 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
5023 | list_del(&ioa_cfg->reset_cmd->queue); | ||
5024 | del_timer(&ioa_cfg->reset_cmd->timer); | ||
5025 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | ||
5026 | return IRQ_HANDLED; | ||
5027 | } | ||
5028 | } | ||
5029 | |||
5030 | return IRQ_NONE; | ||
5031 | } | ||
4959 | 5032 | ||
4960 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { | 5033 | if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { |
4961 | /* Mask the interrupt */ | 5034 | /* Mask the interrupt */ |
@@ -4968,6 +5041,13 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, | |||
4968 | list_del(&ioa_cfg->reset_cmd->queue); | 5041 | list_del(&ioa_cfg->reset_cmd->queue); |
4969 | del_timer(&ioa_cfg->reset_cmd->timer); | 5042 | del_timer(&ioa_cfg->reset_cmd->timer); |
4970 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | 5043 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); |
5044 | } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { | ||
5045 | if (ipr_debug && printk_ratelimit()) | ||
5046 | dev_err(&ioa_cfg->pdev->dev, | ||
5047 | "Spurious interrupt detected. 0x%08X\n", int_reg); | ||
5048 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); | ||
5049 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); | ||
5050 | return IRQ_NONE; | ||
4971 | } else { | 5051 | } else { |
4972 | if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) | 5052 | if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) |
4973 | ioa_cfg->ioa_unit_checked = 1; | 5053 | ioa_cfg->ioa_unit_checked = 1; |
@@ -5016,10 +5096,11 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5016 | { | 5096 | { |
5017 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; | 5097 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; |
5018 | unsigned long lock_flags = 0; | 5098 | unsigned long lock_flags = 0; |
5019 | volatile u32 int_reg, int_mask_reg; | 5099 | u32 int_reg = 0; |
5020 | u32 ioasc; | 5100 | u32 ioasc; |
5021 | u16 cmd_index; | 5101 | u16 cmd_index; |
5022 | int num_hrrq = 0; | 5102 | int num_hrrq = 0; |
5103 | int irq_none = 0; | ||
5023 | struct ipr_cmnd *ipr_cmd; | 5104 | struct ipr_cmnd *ipr_cmd; |
5024 | irqreturn_t rc = IRQ_NONE; | 5105 | irqreturn_t rc = IRQ_NONE; |
5025 | 5106 | ||
@@ -5031,33 +5112,6 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5031 | return IRQ_NONE; | 5112 | return IRQ_NONE; |
5032 | } | 5113 | } |
5033 | 5114 | ||
5034 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); | ||
5035 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | ||
5036 | |||
5037 | /* If an interrupt on the adapter did not occur, ignore it. | ||
5038 | * Or in the case of SIS 64, check for a stage change interrupt. | ||
5039 | */ | ||
5040 | if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { | ||
5041 | if (ioa_cfg->sis64) { | ||
5042 | int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | ||
5043 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
5044 | if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { | ||
5045 | |||
5046 | /* clear stage change */ | ||
5047 | writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); | ||
5048 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; | ||
5049 | list_del(&ioa_cfg->reset_cmd->queue); | ||
5050 | del_timer(&ioa_cfg->reset_cmd->timer); | ||
5051 | ipr_reset_ioa_job(ioa_cfg->reset_cmd); | ||
5052 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
5053 | return IRQ_HANDLED; | ||
5054 | } | ||
5055 | } | ||
5056 | |||
5057 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
5058 | return IRQ_NONE; | ||
5059 | } | ||
5060 | |||
5061 | while (1) { | 5115 | while (1) { |
5062 | ipr_cmd = NULL; | 5116 | ipr_cmd = NULL; |
5063 | 5117 | ||
@@ -5097,7 +5151,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5097 | /* Clear the PCI interrupt */ | 5151 | /* Clear the PCI interrupt */ |
5098 | do { | 5152 | do { |
5099 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); | 5153 | writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); |
5100 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; | 5154 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); |
5101 | } while (int_reg & IPR_PCII_HRRQ_UPDATED && | 5155 | } while (int_reg & IPR_PCII_HRRQ_UPDATED && |
5102 | num_hrrq++ < IPR_MAX_HRRQ_RETRIES); | 5156 | num_hrrq++ < IPR_MAX_HRRQ_RETRIES); |
5103 | 5157 | ||
@@ -5107,6 +5161,9 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
5107 | return IRQ_HANDLED; | 5161 | return IRQ_HANDLED; |
5108 | } | 5162 | } |
5109 | 5163 | ||
5164 | } else if (rc == IRQ_NONE && irq_none == 0) { | ||
5165 | int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); | ||
5166 | irq_none++; | ||
5110 | } else | 5167 | } else |
5111 | break; | 5168 | break; |
5112 | } | 5169 | } |
@@ -5143,7 +5200,8 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, | |||
5143 | 5200 | ||
5144 | nseg = scsi_dma_map(scsi_cmd); | 5201 | nseg = scsi_dma_map(scsi_cmd); |
5145 | if (nseg < 0) { | 5202 | if (nseg < 0) { |
5146 | dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); | 5203 | if (printk_ratelimit()) |
5204 | dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); | ||
5147 | return -1; | 5205 | return -1; |
5148 | } | 5206 | } |
5149 | 5207 | ||
@@ -5773,7 +5831,8 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, | |||
5773 | } | 5831 | } |
5774 | 5832 | ||
5775 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; | 5833 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; |
5776 | ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; | 5834 | if (ipr_is_gscsi(res)) |
5835 | ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; | ||
5777 | ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; | 5836 | ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; |
5778 | ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); | 5837 | ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); |
5779 | } | 5838 | } |
@@ -7516,7 +7575,7 @@ static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) | |||
7516 | static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | 7575 | static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) |
7517 | { | 7576 | { |
7518 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 7577 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
7519 | volatile u32 int_reg; | 7578 | u32 int_reg; |
7520 | 7579 | ||
7521 | ENTER; | 7580 | ENTER; |
7522 | ioa_cfg->pdev->state_saved = true; | 7581 | ioa_cfg->pdev->state_saved = true; |
@@ -7555,7 +7614,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | |||
7555 | ipr_cmd->job_step = ipr_reset_enable_ioa; | 7614 | ipr_cmd->job_step = ipr_reset_enable_ioa; |
7556 | 7615 | ||
7557 | if (GET_DUMP == ioa_cfg->sdt_state) { | 7616 | if (GET_DUMP == ioa_cfg->sdt_state) { |
7558 | ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT); | 7617 | if (ioa_cfg->sis64) |
7618 | ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); | ||
7619 | else | ||
7620 | ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); | ||
7559 | ipr_cmd->job_step = ipr_reset_wait_for_dump; | 7621 | ipr_cmd->job_step = ipr_reset_wait_for_dump; |
7560 | schedule_work(&ioa_cfg->work_q); | 7622 | schedule_work(&ioa_cfg->work_q); |
7561 | return IPR_RC_JOB_RETURN; | 7623 | return IPR_RC_JOB_RETURN; |