diff options
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r-- | drivers/scsi/ipr.c | 349 |
1 files changed, 324 insertions, 25 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2c7b77e833f9..4baa79e68679 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -92,6 +92,7 @@ static unsigned int ipr_fastfail = 0; | |||
92 | static unsigned int ipr_transop_timeout = 0; | 92 | static unsigned int ipr_transop_timeout = 0; |
93 | static unsigned int ipr_enable_cache = 1; | 93 | static unsigned int ipr_enable_cache = 1; |
94 | static unsigned int ipr_debug = 0; | 94 | static unsigned int ipr_debug = 0; |
95 | static unsigned int ipr_dual_ioa_raid = 1; | ||
95 | static DEFINE_SPINLOCK(ipr_driver_lock); | 96 | static DEFINE_SPINLOCK(ipr_driver_lock); |
96 | 97 | ||
97 | /* This table describes the differences between DMA controller chips */ | 98 | /* This table describes the differences between DMA controller chips */ |
@@ -158,6 +159,8 @@ module_param_named(enable_cache, ipr_enable_cache, int, 0); | |||
158 | MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); | 159 | MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); |
159 | module_param_named(debug, ipr_debug, int, 0); | 160 | module_param_named(debug, ipr_debug, int, 0); |
160 | MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); | 161 | MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); |
162 | module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); | ||
163 | MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); | ||
161 | MODULE_LICENSE("GPL"); | 164 | MODULE_LICENSE("GPL"); |
162 | MODULE_VERSION(IPR_DRIVER_VERSION); | 165 | MODULE_VERSION(IPR_DRIVER_VERSION); |
163 | 166 | ||
@@ -206,6 +209,8 @@ struct ipr_error_table_t ipr_error_table[] = { | |||
206 | "8009: Impending cache battery pack failure"}, | 209 | "8009: Impending cache battery pack failure"}, |
207 | {0x02040400, 0, 0, | 210 | {0x02040400, 0, 0, |
208 | "34FF: Disk device format in progress"}, | 211 | "34FF: Disk device format in progress"}, |
212 | {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, | ||
213 | "9070: IOA requested reset"}, | ||
209 | {0x023F0000, 0, 0, | 214 | {0x023F0000, 0, 0, |
210 | "Synchronization required"}, | 215 | "Synchronization required"}, |
211 | {0x024E0000, 0, 0, | 216 | {0x024E0000, 0, 0, |
@@ -951,6 +956,53 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) | |||
951 | } | 956 | } |
952 | 957 | ||
953 | /** | 958 | /** |
959 | * strip_and_pad_whitespace - Strip and pad trailing whitespace. | ||
960 | * @i: index into buffer | ||
961 | * @buf: string to modify | ||
962 | * | ||
963 | * This function will strip all trailing whitespace, pad the end | ||
964 | * of the string with a single space, and NULL terminate the string. | ||
965 | * | ||
966 | * Return value: | ||
967 | * new length of string | ||
968 | **/ | ||
969 | static int strip_and_pad_whitespace(int i, char *buf) | ||
970 | { | ||
971 | while (i && buf[i] == ' ') | ||
972 | i--; | ||
973 | buf[i+1] = ' '; | ||
974 | buf[i+2] = '\0'; | ||
975 | return i + 2; | ||
976 | } | ||
977 | |||
978 | /** | ||
979 | * ipr_log_vpd_compact - Log the passed extended VPD compactly. | ||
980 | * @prefix: string to print at start of printk | ||
981 | * @hostrcb: hostrcb pointer | ||
982 | * @vpd: vendor/product id/sn struct | ||
983 | * | ||
984 | * Return value: | ||
985 | * none | ||
986 | **/ | ||
987 | static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, | ||
988 | struct ipr_vpd *vpd) | ||
989 | { | ||
990 | char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; | ||
991 | int i = 0; | ||
992 | |||
993 | memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); | ||
994 | i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); | ||
995 | |||
996 | memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); | ||
997 | i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); | ||
998 | |||
999 | memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); | ||
1000 | buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; | ||
1001 | |||
1002 | ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); | ||
1003 | } | ||
1004 | |||
1005 | /** | ||
954 | * ipr_log_vpd - Log the passed VPD to the error log. | 1006 | * ipr_log_vpd - Log the passed VPD to the error log. |
955 | * @vpd: vendor/product id/sn struct | 1007 | * @vpd: vendor/product id/sn struct |
956 | * | 1008 | * |
@@ -974,6 +1026,23 @@ static void ipr_log_vpd(struct ipr_vpd *vpd) | |||
974 | } | 1026 | } |
975 | 1027 | ||
976 | /** | 1028 | /** |
1029 | * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly. | ||
1030 | * @prefix: string to print at start of printk | ||
1031 | * @hostrcb: hostrcb pointer | ||
1032 | * @vpd: vendor/product id/sn/wwn struct | ||
1033 | * | ||
1034 | * Return value: | ||
1035 | * none | ||
1036 | **/ | ||
1037 | static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, | ||
1038 | struct ipr_ext_vpd *vpd) | ||
1039 | { | ||
1040 | ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); | ||
1041 | ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix, | ||
1042 | be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); | ||
1043 | } | ||
1044 | |||
1045 | /** | ||
977 | * ipr_log_ext_vpd - Log the passed extended VPD to the error log. | 1046 | * ipr_log_ext_vpd - Log the passed extended VPD to the error log. |
978 | * @vpd: vendor/product id/sn/wwn struct | 1047 | * @vpd: vendor/product id/sn/wwn struct |
979 | * | 1048 | * |
@@ -1287,10 +1356,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, | |||
1287 | 1356 | ||
1288 | error = &hostrcb->hcam.u.error.u.type_17_error; | 1357 | error = &hostrcb->hcam.u.error.u.type_17_error; |
1289 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; | 1358 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; |
1359 | strstrip(error->failure_reason); | ||
1290 | 1360 | ||
1291 | ipr_err("%s\n", error->failure_reason); | 1361 | ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, |
1292 | ipr_err("Remote Adapter VPD:\n"); | 1362 | be32_to_cpu(hostrcb->hcam.u.error.prc)); |
1293 | ipr_log_ext_vpd(&error->vpd); | 1363 | ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); |
1294 | ipr_log_hex_data(ioa_cfg, error->data, | 1364 | ipr_log_hex_data(ioa_cfg, error->data, |
1295 | be32_to_cpu(hostrcb->hcam.length) - | 1365 | be32_to_cpu(hostrcb->hcam.length) - |
1296 | (offsetof(struct ipr_hostrcb_error, u) + | 1366 | (offsetof(struct ipr_hostrcb_error, u) + |
@@ -1312,10 +1382,11 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, | |||
1312 | 1382 | ||
1313 | error = &hostrcb->hcam.u.error.u.type_07_error; | 1383 | error = &hostrcb->hcam.u.error.u.type_07_error; |
1314 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; | 1384 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; |
1385 | strstrip(error->failure_reason); | ||
1315 | 1386 | ||
1316 | ipr_err("%s\n", error->failure_reason); | 1387 | ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, |
1317 | ipr_err("Remote Adapter VPD:\n"); | 1388 | be32_to_cpu(hostrcb->hcam.u.error.prc)); |
1318 | ipr_log_vpd(&error->vpd); | 1389 | ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); |
1319 | ipr_log_hex_data(ioa_cfg, error->data, | 1390 | ipr_log_hex_data(ioa_cfg, error->data, |
1320 | be32_to_cpu(hostrcb->hcam.length) - | 1391 | be32_to_cpu(hostrcb->hcam.length) - |
1321 | (offsetof(struct ipr_hostrcb_error, u) + | 1392 | (offsetof(struct ipr_hostrcb_error, u) + |
@@ -1672,12 +1743,15 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) | |||
1672 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 1743 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
1673 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; | 1744 | struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; |
1674 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 1745 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); |
1746 | u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); | ||
1675 | 1747 | ||
1676 | list_del(&hostrcb->queue); | 1748 | list_del(&hostrcb->queue); |
1677 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 1749 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
1678 | 1750 | ||
1679 | if (!ioasc) { | 1751 | if (!ioasc) { |
1680 | ipr_handle_log_data(ioa_cfg, hostrcb); | 1752 | ipr_handle_log_data(ioa_cfg, hostrcb); |
1753 | if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED) | ||
1754 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); | ||
1681 | } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { | 1755 | } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { |
1682 | dev_err(&ioa_cfg->pdev->dev, | 1756 | dev_err(&ioa_cfg->pdev->dev, |
1683 | "Host RCB failed with IOASC: 0x%08X\n", ioasc); | 1757 | "Host RCB failed with IOASC: 0x%08X\n", ioasc); |
@@ -2635,8 +2709,13 @@ static ssize_t ipr_store_diagnostics(struct class_device *class_dev, | |||
2635 | if (!capable(CAP_SYS_ADMIN)) | 2709 | if (!capable(CAP_SYS_ADMIN)) |
2636 | return -EACCES; | 2710 | return -EACCES; |
2637 | 2711 | ||
2638 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
2639 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 2712 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
2713 | while(ioa_cfg->in_reset_reload) { | ||
2714 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
2715 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
2716 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
2717 | } | ||
2718 | |||
2640 | ioa_cfg->errors_logged = 0; | 2719 | ioa_cfg->errors_logged = 0; |
2641 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); | 2720 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); |
2642 | 2721 | ||
@@ -2958,6 +3037,11 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, | |||
2958 | unsigned long lock_flags; | 3037 | unsigned long lock_flags; |
2959 | 3038 | ||
2960 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 3039 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
3040 | while(ioa_cfg->in_reset_reload) { | ||
3041 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
3042 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
3043 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
3044 | } | ||
2961 | 3045 | ||
2962 | if (ioa_cfg->ucode_sglist) { | 3046 | if (ioa_cfg->ucode_sglist) { |
2963 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3047 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
@@ -4656,18 +4740,19 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, | |||
4656 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 4740 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
4657 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; | 4741 | struct ipr_resource_entry *res = scsi_cmd->device->hostdata; |
4658 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | 4742 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); |
4743 | u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; | ||
4659 | 4744 | ||
4660 | if (!res) { | 4745 | if (!res) { |
4661 | ipr_scsi_eh_done(ipr_cmd); | 4746 | ipr_scsi_eh_done(ipr_cmd); |
4662 | return; | 4747 | return; |
4663 | } | 4748 | } |
4664 | 4749 | ||
4665 | if (!ipr_is_gscsi(res)) | 4750 | if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS) |
4666 | ipr_gen_sense(ipr_cmd); | 4751 | ipr_gen_sense(ipr_cmd); |
4667 | 4752 | ||
4668 | ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); | 4753 | ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); |
4669 | 4754 | ||
4670 | switch (ioasc & IPR_IOASC_IOASC_MASK) { | 4755 | switch (masked_ioasc) { |
4671 | case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: | 4756 | case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: |
4672 | if (ipr_is_naca_model(res)) | 4757 | if (ipr_is_naca_model(res)) |
4673 | scsi_cmd->result |= (DID_ABORT << 16); | 4758 | scsi_cmd->result |= (DID_ABORT << 16); |
@@ -5363,6 +5448,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) | |||
5363 | ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); | 5448 | ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); |
5364 | } | 5449 | } |
5365 | 5450 | ||
5451 | scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); | ||
5366 | dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); | 5452 | dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); |
5367 | 5453 | ||
5368 | ioa_cfg->reset_retries = 0; | 5454 | ioa_cfg->reset_retries = 0; |
@@ -5799,6 +5885,94 @@ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) | |||
5799 | } | 5885 | } |
5800 | 5886 | ||
5801 | /** | 5887 | /** |
5888 | * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA | ||
5889 | * @ipr_cmd: ipr command struct | ||
5890 | * | ||
5891 | * This function enables dual IOA RAID support if possible. | ||
5892 | * | ||
5893 | * Return value: | ||
5894 | * IPR_RC_JOB_RETURN | ||
5895 | **/ | ||
5896 | static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) | ||
5897 | { | ||
5898 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
5899 | struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; | ||
5900 | struct ipr_mode_page24 *mode_page; | ||
5901 | int length; | ||
5902 | |||
5903 | ENTER; | ||
5904 | mode_page = ipr_get_mode_page(mode_pages, 0x24, | ||
5905 | sizeof(struct ipr_mode_page24)); | ||
5906 | |||
5907 | if (mode_page) | ||
5908 | mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; | ||
5909 | |||
5910 | length = mode_pages->hdr.length + 1; | ||
5911 | mode_pages->hdr.length = 0; | ||
5912 | |||
5913 | ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, | ||
5914 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), | ||
5915 | length); | ||
5916 | |||
5917 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; | ||
5918 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | ||
5919 | |||
5920 | LEAVE; | ||
5921 | return IPR_RC_JOB_RETURN; | ||
5922 | } | ||
5923 | |||
5924 | /** | ||
5925 | * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense | ||
5926 | * @ipr_cmd: ipr command struct | ||
5927 | * | ||
5928 | * This function handles the failure of a Mode Sense to the IOAFP. | ||
5929 | * Some adapters do not handle all mode pages. | ||
5930 | * | ||
5931 | * Return value: | ||
5932 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | ||
5933 | **/ | ||
5934 | static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) | ||
5935 | { | ||
5936 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | ||
5937 | |||
5938 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { | ||
5939 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; | ||
5940 | return IPR_RC_JOB_CONTINUE; | ||
5941 | } | ||
5942 | |||
5943 | return ipr_reset_cmd_failed(ipr_cmd); | ||
5944 | } | ||
5945 | |||
5946 | /** | ||
5947 | * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA | ||
5948 | * @ipr_cmd: ipr command struct | ||
5949 | * | ||
5950 | * This function send a mode sense to the IOA to retrieve | ||
5951 | * the IOA Advanced Function Control mode page. | ||
5952 | * | ||
5953 | * Return value: | ||
5954 | * IPR_RC_JOB_RETURN | ||
5955 | **/ | ||
5956 | static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd) | ||
5957 | { | ||
5958 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
5959 | |||
5960 | ENTER; | ||
5961 | ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), | ||
5962 | 0x24, ioa_cfg->vpd_cbs_dma + | ||
5963 | offsetof(struct ipr_misc_cbs, mode_pages), | ||
5964 | sizeof(struct ipr_mode_pages)); | ||
5965 | |||
5966 | ipr_cmd->job_step = ipr_ioafp_mode_select_page24; | ||
5967 | ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; | ||
5968 | |||
5969 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | ||
5970 | |||
5971 | LEAVE; | ||
5972 | return IPR_RC_JOB_RETURN; | ||
5973 | } | ||
5974 | |||
5975 | /** | ||
5802 | * ipr_init_res_table - Initialize the resource table | 5976 | * ipr_init_res_table - Initialize the resource table |
5803 | * @ipr_cmd: ipr command struct | 5977 | * @ipr_cmd: ipr command struct |
5804 | * | 5978 | * |
@@ -5866,7 +6040,10 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) | |||
5866 | } | 6040 | } |
5867 | } | 6041 | } |
5868 | 6042 | ||
5869 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; | 6043 | if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) |
6044 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; | ||
6045 | else | ||
6046 | ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; | ||
5870 | 6047 | ||
5871 | LEAVE; | 6048 | LEAVE; |
5872 | return IPR_RC_JOB_CONTINUE; | 6049 | return IPR_RC_JOB_CONTINUE; |
@@ -5888,8 +6065,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) | |||
5888 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 6065 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
5889 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | 6066 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; |
5890 | struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; | 6067 | struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; |
6068 | struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; | ||
5891 | 6069 | ||
5892 | ENTER; | 6070 | ENTER; |
6071 | if (cap->cap & IPR_CAP_DUAL_IOA_RAID) | ||
6072 | ioa_cfg->dual_raid = 1; | ||
5893 | dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", | 6073 | dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", |
5894 | ucode_vpd->major_release, ucode_vpd->card_type, | 6074 | ucode_vpd->major_release, ucode_vpd->card_type, |
5895 | ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); | 6075 | ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); |
@@ -5973,6 +6153,37 @@ static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) | |||
5973 | } | 6153 | } |
5974 | 6154 | ||
5975 | /** | 6155 | /** |
6156 | * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter. | ||
6157 | * @ipr_cmd: ipr command struct | ||
6158 | * | ||
6159 | * This function sends a Page 0xD0 inquiry to the adapter | ||
6160 | * to retrieve adapter capabilities. | ||
6161 | * | ||
6162 | * Return value: | ||
6163 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | ||
6164 | **/ | ||
6165 | static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) | ||
6166 | { | ||
6167 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
6168 | struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; | ||
6169 | struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; | ||
6170 | |||
6171 | ENTER; | ||
6172 | ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; | ||
6173 | memset(cap, 0, sizeof(*cap)); | ||
6174 | |||
6175 | if (ipr_inquiry_page_supported(page0, 0xD0)) { | ||
6176 | ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0, | ||
6177 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), | ||
6178 | sizeof(struct ipr_inquiry_cap)); | ||
6179 | return IPR_RC_JOB_RETURN; | ||
6180 | } | ||
6181 | |||
6182 | LEAVE; | ||
6183 | return IPR_RC_JOB_CONTINUE; | ||
6184 | } | ||
6185 | |||
6186 | /** | ||
5976 | * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. | 6187 | * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. |
5977 | * @ipr_cmd: ipr command struct | 6188 | * @ipr_cmd: ipr command struct |
5978 | * | 6189 | * |
@@ -5992,7 +6203,7 @@ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) | |||
5992 | if (!ipr_inquiry_page_supported(page0, 1)) | 6203 | if (!ipr_inquiry_page_supported(page0, 1)) |
5993 | ioa_cfg->cache_state = CACHE_NONE; | 6204 | ioa_cfg->cache_state = CACHE_NONE; |
5994 | 6205 | ||
5995 | ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; | 6206 | ipr_cmd->job_step = ipr_ioafp_cap_inquiry; |
5996 | 6207 | ||
5997 | ipr_ioafp_inquiry(ipr_cmd, 1, 3, | 6208 | ipr_ioafp_inquiry(ipr_cmd, 1, 3, |
5998 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), | 6209 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), |
@@ -6278,6 +6489,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) | |||
6278 | struct ipr_hostrcb *hostrcb; | 6489 | struct ipr_hostrcb *hostrcb; |
6279 | struct ipr_uc_sdt sdt; | 6490 | struct ipr_uc_sdt sdt; |
6280 | int rc, length; | 6491 | int rc, length; |
6492 | u32 ioasc; | ||
6281 | 6493 | ||
6282 | mailbox = readl(ioa_cfg->ioa_mailbox); | 6494 | mailbox = readl(ioa_cfg->ioa_mailbox); |
6283 | 6495 | ||
@@ -6310,9 +6522,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) | |||
6310 | (__be32 *)&hostrcb->hcam, | 6522 | (__be32 *)&hostrcb->hcam, |
6311 | min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); | 6523 | min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); |
6312 | 6524 | ||
6313 | if (!rc) | 6525 | if (!rc) { |
6314 | ipr_handle_log_data(ioa_cfg, hostrcb); | 6526 | ipr_handle_log_data(ioa_cfg, hostrcb); |
6315 | else | 6527 | ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); |
6528 | if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && | ||
6529 | ioa_cfg->sdt_state == GET_DUMP) | ||
6530 | ioa_cfg->sdt_state = WAIT_FOR_DUMP; | ||
6531 | } else | ||
6316 | ipr_unit_check_no_data(ioa_cfg); | 6532 | ipr_unit_check_no_data(ioa_cfg); |
6317 | 6533 | ||
6318 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); | 6534 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); |
@@ -6425,6 +6641,48 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) | |||
6425 | } | 6641 | } |
6426 | 6642 | ||
6427 | /** | 6643 | /** |
6644 | * ipr_reset_slot_reset_done - Clear PCI reset to the adapter | ||
6645 | * @ipr_cmd: ipr command struct | ||
6646 | * | ||
6647 | * Description: This clears PCI reset to the adapter and delays two seconds. | ||
6648 | * | ||
6649 | * Return value: | ||
6650 | * IPR_RC_JOB_RETURN | ||
6651 | **/ | ||
6652 | static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd) | ||
6653 | { | ||
6654 | ENTER; | ||
6655 | pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset); | ||
6656 | ipr_cmd->job_step = ipr_reset_bist_done; | ||
6657 | ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); | ||
6658 | LEAVE; | ||
6659 | return IPR_RC_JOB_RETURN; | ||
6660 | } | ||
6661 | |||
6662 | /** | ||
6663 | * ipr_reset_slot_reset - Reset the PCI slot of the adapter. | ||
6664 | * @ipr_cmd: ipr command struct | ||
6665 | * | ||
6666 | * Description: This asserts PCI reset to the adapter. | ||
6667 | * | ||
6668 | * Return value: | ||
6669 | * IPR_RC_JOB_RETURN | ||
6670 | **/ | ||
6671 | static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd) | ||
6672 | { | ||
6673 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
6674 | struct pci_dev *pdev = ioa_cfg->pdev; | ||
6675 | |||
6676 | ENTER; | ||
6677 | pci_block_user_cfg_access(pdev); | ||
6678 | pci_set_pcie_reset_state(pdev, pcie_warm_reset); | ||
6679 | ipr_cmd->job_step = ipr_reset_slot_reset_done; | ||
6680 | ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT); | ||
6681 | LEAVE; | ||
6682 | return IPR_RC_JOB_RETURN; | ||
6683 | } | ||
6684 | |||
6685 | /** | ||
6428 | * ipr_reset_allowed - Query whether or not IOA can be reset | 6686 | * ipr_reset_allowed - Query whether or not IOA can be reset |
6429 | * @ioa_cfg: ioa config struct | 6687 | * @ioa_cfg: ioa config struct |
6430 | * | 6688 | * |
@@ -6463,7 +6721,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) | |||
6463 | ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; | 6721 | ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; |
6464 | ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); | 6722 | ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); |
6465 | } else { | 6723 | } else { |
6466 | ipr_cmd->job_step = ipr_reset_start_bist; | 6724 | ipr_cmd->job_step = ioa_cfg->reset; |
6467 | rc = IPR_RC_JOB_CONTINUE; | 6725 | rc = IPR_RC_JOB_CONTINUE; |
6468 | } | 6726 | } |
6469 | 6727 | ||
@@ -6496,7 +6754,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) | |||
6496 | writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); | 6754 | writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); |
6497 | ipr_cmd->job_step = ipr_reset_wait_to_start_bist; | 6755 | ipr_cmd->job_step = ipr_reset_wait_to_start_bist; |
6498 | } else { | 6756 | } else { |
6499 | ipr_cmd->job_step = ipr_reset_start_bist; | 6757 | ipr_cmd->job_step = ioa_cfg->reset; |
6500 | } | 6758 | } |
6501 | 6759 | ||
6502 | ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; | 6760 | ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; |
@@ -6591,12 +6849,14 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) | |||
6591 | ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; | 6849 | ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; |
6592 | ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; | 6850 | ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; |
6593 | 6851 | ||
6594 | if (shutdown_type == IPR_SHUTDOWN_ABBREV) | 6852 | if (shutdown_type == IPR_SHUTDOWN_NORMAL) |
6595 | timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; | 6853 | timeout = IPR_SHUTDOWN_TIMEOUT; |
6596 | else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) | 6854 | else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) |
6597 | timeout = IPR_INTERNAL_TIMEOUT; | 6855 | timeout = IPR_INTERNAL_TIMEOUT; |
6856 | else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) | ||
6857 | timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO; | ||
6598 | else | 6858 | else |
6599 | timeout = IPR_SHUTDOWN_TIMEOUT; | 6859 | timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; |
6600 | 6860 | ||
6601 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); | 6861 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); |
6602 | 6862 | ||
@@ -6776,8 +7036,11 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) | |||
6776 | struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); | 7036 | struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); |
6777 | 7037 | ||
6778 | spin_lock_irqsave(ioa_cfg->host->host_lock, flags); | 7038 | spin_lock_irqsave(ioa_cfg->host->host_lock, flags); |
6779 | _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, | 7039 | if (ioa_cfg->needs_warm_reset) |
6780 | IPR_SHUTDOWN_NONE); | 7040 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); |
7041 | else | ||
7042 | _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, | ||
7043 | IPR_SHUTDOWN_NONE); | ||
6781 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); | 7044 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); |
6782 | return PCI_ERS_RESULT_RECOVERED; | 7045 | return PCI_ERS_RESULT_RECOVERED; |
6783 | } | 7046 | } |
@@ -7226,7 +7489,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7226 | unsigned long ipr_regs_pci; | 7489 | unsigned long ipr_regs_pci; |
7227 | void __iomem *ipr_regs; | 7490 | void __iomem *ipr_regs; |
7228 | int rc = PCIBIOS_SUCCESSFUL; | 7491 | int rc = PCIBIOS_SUCCESSFUL; |
7229 | volatile u32 mask, uproc; | 7492 | volatile u32 mask, uproc, interrupts; |
7230 | 7493 | ||
7231 | ENTER; | 7494 | ENTER; |
7232 | 7495 | ||
@@ -7265,6 +7528,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7265 | else | 7528 | else |
7266 | ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; | 7529 | ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; |
7267 | 7530 | ||
7531 | rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &ioa_cfg->revid); | ||
7532 | |||
7533 | if (rc != PCIBIOS_SUCCESSFUL) { | ||
7534 | dev_err(&pdev->dev, "Failed to read PCI revision ID\n"); | ||
7535 | rc = -EIO; | ||
7536 | goto out_scsi_host_put; | ||
7537 | } | ||
7538 | |||
7268 | ipr_regs_pci = pci_resource_start(pdev, 0); | 7539 | ipr_regs_pci = pci_resource_start(pdev, 0); |
7269 | 7540 | ||
7270 | rc = pci_request_regions(pdev, IPR_NAME); | 7541 | rc = pci_request_regions(pdev, IPR_NAME); |
@@ -7333,9 +7604,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7333 | * the card is in an unknown state and needs a hard reset | 7604 | * the card is in an unknown state and needs a hard reset |
7334 | */ | 7605 | */ |
7335 | mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | 7606 | mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); |
7607 | interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); | ||
7336 | uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); | 7608 | uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); |
7337 | if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) | 7609 | if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) |
7338 | ioa_cfg->needs_hard_reset = 1; | 7610 | ioa_cfg->needs_hard_reset = 1; |
7611 | if (interrupts & IPR_PCII_ERROR_INTERRUPTS) | ||
7612 | ioa_cfg->needs_hard_reset = 1; | ||
7613 | if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) | ||
7614 | ioa_cfg->ioa_unit_checked = 1; | ||
7339 | 7615 | ||
7340 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); | 7616 | ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); |
7341 | rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); | 7617 | rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg); |
@@ -7346,6 +7622,13 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7346 | goto cleanup_nolog; | 7622 | goto cleanup_nolog; |
7347 | } | 7623 | } |
7348 | 7624 | ||
7625 | if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || | ||
7626 | (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { | ||
7627 | ioa_cfg->needs_warm_reset = 1; | ||
7628 | ioa_cfg->reset = ipr_reset_slot_reset; | ||
7629 | } else | ||
7630 | ioa_cfg->reset = ipr_reset_start_bist; | ||
7631 | |||
7349 | spin_lock(&ipr_driver_lock); | 7632 | spin_lock(&ipr_driver_lock); |
7350 | list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); | 7633 | list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); |
7351 | spin_unlock(&ipr_driver_lock); | 7634 | spin_unlock(&ipr_driver_lock); |
@@ -7428,6 +7711,12 @@ static void __ipr_remove(struct pci_dev *pdev) | |||
7428 | ENTER; | 7711 | ENTER; |
7429 | 7712 | ||
7430 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); | 7713 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); |
7714 | while(ioa_cfg->in_reset_reload) { | ||
7715 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); | ||
7716 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
7717 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); | ||
7718 | } | ||
7719 | |||
7431 | ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); | 7720 | ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); |
7432 | 7721 | ||
7433 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); | 7722 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); |
@@ -7551,6 +7840,12 @@ static void ipr_shutdown(struct pci_dev *pdev) | |||
7551 | unsigned long lock_flags = 0; | 7840 | unsigned long lock_flags = 0; |
7552 | 7841 | ||
7553 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 7842 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
7843 | while(ioa_cfg->in_reset_reload) { | ||
7844 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
7845 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
7846 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
7847 | } | ||
7848 | |||
7554 | ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); | 7849 | ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); |
7555 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 7850 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
7556 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | 7851 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); |
@@ -7577,19 +7872,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { | |||
7577 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, | 7872 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, |
7578 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, | 7873 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, |
7579 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, | 7874 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, |
7580 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, | 7875 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, |
7876 | IPR_USE_LONG_TRANSOP_TIMEOUT }, | ||
7581 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, | 7877 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, |
7582 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, | 7878 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, |
7583 | IPR_USE_LONG_TRANSOP_TIMEOUT }, | 7879 | IPR_USE_LONG_TRANSOP_TIMEOUT }, |
7584 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, | 7880 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, |
7585 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, | 7881 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 }, |
7586 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, | 7882 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, |
7587 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 }, | 7883 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, |
7884 | IPR_USE_LONG_TRANSOP_TIMEOUT}, | ||
7588 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, | 7885 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, |
7589 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, | 7886 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, |
7590 | IPR_USE_LONG_TRANSOP_TIMEOUT }, | 7887 | IPR_USE_LONG_TRANSOP_TIMEOUT }, |
7591 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, | 7888 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, |
7592 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 0 }, | 7889 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, |
7890 | IPR_USE_LONG_TRANSOP_TIMEOUT }, | ||
7593 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, | 7891 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, |
7594 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, | 7892 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, |
7595 | IPR_USE_LONG_TRANSOP_TIMEOUT }, | 7893 | IPR_USE_LONG_TRANSOP_TIMEOUT }, |
@@ -7597,7 +7895,7 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { | |||
7597 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, | 7895 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, |
7598 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, | 7896 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, |
7599 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, | 7897 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, |
7600 | IPR_USE_LONG_TRANSOP_TIMEOUT }, | 7898 | IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET }, |
7601 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, | 7899 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, |
7602 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, | 7900 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 }, |
7603 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, | 7901 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, |
@@ -7627,6 +7925,7 @@ static struct pci_driver ipr_driver = { | |||
7627 | .remove = ipr_remove, | 7925 | .remove = ipr_remove, |
7628 | .shutdown = ipr_shutdown, | 7926 | .shutdown = ipr_shutdown, |
7629 | .err_handler = &ipr_err_handler, | 7927 | .err_handler = &ipr_err_handler, |
7928 | .dynids.use_driver_data = 1 | ||
7630 | }; | 7929 | }; |
7631 | 7930 | ||
7632 | /** | 7931 | /** |