diff options
Diffstat (limited to 'drivers/scsi/ipr.c')
| -rw-r--r-- | drivers/scsi/ipr.c | 904 |
1 files changed, 732 insertions, 172 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e0039dfae8e5..fa2cb3582cfa 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -91,11 +91,14 @@ static unsigned int ipr_max_speed = 1; | |||
| 91 | static int ipr_testmode = 0; | 91 | static int ipr_testmode = 0; |
| 92 | static unsigned int ipr_fastfail = 0; | 92 | static unsigned int ipr_fastfail = 0; |
| 93 | static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; | 93 | static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT; |
| 94 | static unsigned int ipr_enable_cache = 1; | ||
| 95 | static unsigned int ipr_debug = 0; | ||
| 96 | static int ipr_auto_create = 1; | ||
| 94 | static DEFINE_SPINLOCK(ipr_driver_lock); | 97 | static DEFINE_SPINLOCK(ipr_driver_lock); |
| 95 | 98 | ||
| 96 | /* This table describes the differences between DMA controller chips */ | 99 | /* This table describes the differences between DMA controller chips */ |
| 97 | static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | 100 | static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { |
| 98 | { /* Gemstone and Citrine */ | 101 | { /* Gemstone, Citrine, and Obsidian */ |
| 99 | .mailbox = 0x0042C, | 102 | .mailbox = 0x0042C, |
| 100 | .cache_line_size = 0x20, | 103 | .cache_line_size = 0x20, |
| 101 | { | 104 | { |
| @@ -130,6 +133,8 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | |||
| 130 | static const struct ipr_chip_t ipr_chip[] = { | 133 | static const struct ipr_chip_t ipr_chip[] = { |
| 131 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] }, | 134 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] }, |
| 132 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, | 135 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, |
| 136 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, | ||
| 137 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, | ||
| 133 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, | 138 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, |
| 134 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } | 139 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } |
| 135 | }; | 140 | }; |
| @@ -150,6 +155,12 @@ module_param_named(fastfail, ipr_fastfail, int, 0); | |||
| 150 | MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); | 155 | MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); |
| 151 | module_param_named(transop_timeout, ipr_transop_timeout, int, 0); | 156 | module_param_named(transop_timeout, ipr_transop_timeout, int, 0); |
| 152 | MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); | 157 | MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); |
| 158 | module_param_named(enable_cache, ipr_enable_cache, int, 0); | ||
| 159 | MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); | ||
| 160 | module_param_named(debug, ipr_debug, int, 0); | ||
| 161 | MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); | ||
| 162 | module_param_named(auto_create, ipr_auto_create, int, 0); | ||
| 163 | MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)"); | ||
| 153 | MODULE_LICENSE("GPL"); | 164 | MODULE_LICENSE("GPL"); |
| 154 | MODULE_VERSION(IPR_DRIVER_VERSION); | 165 | MODULE_VERSION(IPR_DRIVER_VERSION); |
| 155 | 166 | ||
| @@ -285,12 +296,18 @@ struct ipr_error_table_t ipr_error_table[] = { | |||
| 285 | "3110: Device bus error, message or command phase"}, | 296 | "3110: Device bus error, message or command phase"}, |
| 286 | {0x04670400, 0, 1, | 297 | {0x04670400, 0, 1, |
| 287 | "9091: Incorrect hardware configuration change has been detected"}, | 298 | "9091: Incorrect hardware configuration change has been detected"}, |
| 299 | {0x04678000, 0, 1, | ||
| 300 | "9073: Invalid multi-adapter configuration"}, | ||
| 288 | {0x046E0000, 0, 1, | 301 | {0x046E0000, 0, 1, |
| 289 | "FFF4: Command to logical unit failed"}, | 302 | "FFF4: Command to logical unit failed"}, |
| 290 | {0x05240000, 1, 0, | 303 | {0x05240000, 1, 0, |
| 291 | "Illegal request, invalid request type or request packet"}, | 304 | "Illegal request, invalid request type or request packet"}, |
| 292 | {0x05250000, 0, 0, | 305 | {0x05250000, 0, 0, |
| 293 | "Illegal request, invalid resource handle"}, | 306 | "Illegal request, invalid resource handle"}, |
| 307 | {0x05258000, 0, 0, | ||
| 308 | "Illegal request, commands not allowed to this device"}, | ||
| 309 | {0x05258100, 0, 0, | ||
| 310 | "Illegal request, command not allowed to a secondary adapter"}, | ||
| 294 | {0x05260000, 0, 0, | 311 | {0x05260000, 0, 0, |
| 295 | "Illegal request, invalid field in parameter list"}, | 312 | "Illegal request, invalid field in parameter list"}, |
| 296 | {0x05260100, 0, 0, | 313 | {0x05260100, 0, 0, |
| @@ -299,6 +316,8 @@ struct ipr_error_table_t ipr_error_table[] = { | |||
| 299 | "Illegal request, parameter value invalid"}, | 316 | "Illegal request, parameter value invalid"}, |
| 300 | {0x052C0000, 0, 0, | 317 | {0x052C0000, 0, 0, |
| 301 | "Illegal request, command sequence error"}, | 318 | "Illegal request, command sequence error"}, |
| 319 | {0x052C8000, 1, 0, | ||
| 320 | "Illegal request, dual adapter support not enabled"}, | ||
| 302 | {0x06040500, 0, 1, | 321 | {0x06040500, 0, 1, |
| 303 | "9031: Array protection temporarily suspended, protection resuming"}, | 322 | "9031: Array protection temporarily suspended, protection resuming"}, |
| 304 | {0x06040600, 0, 1, | 323 | {0x06040600, 0, 1, |
| @@ -315,18 +334,26 @@ struct ipr_error_table_t ipr_error_table[] = { | |||
| 315 | "3029: A device replacement has occurred"}, | 334 | "3029: A device replacement has occurred"}, |
| 316 | {0x064C8000, 0, 1, | 335 | {0x064C8000, 0, 1, |
| 317 | "9051: IOA cache data exists for a missing or failed device"}, | 336 | "9051: IOA cache data exists for a missing or failed device"}, |
| 337 | {0x064C8100, 0, 1, | ||
| 338 | "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"}, | ||
| 318 | {0x06670100, 0, 1, | 339 | {0x06670100, 0, 1, |
| 319 | "9025: Disk unit is not supported at its physical location"}, | 340 | "9025: Disk unit is not supported at its physical location"}, |
| 320 | {0x06670600, 0, 1, | 341 | {0x06670600, 0, 1, |
| 321 | "3020: IOA detected a SCSI bus configuration error"}, | 342 | "3020: IOA detected a SCSI bus configuration error"}, |
| 322 | {0x06678000, 0, 1, | 343 | {0x06678000, 0, 1, |
| 323 | "3150: SCSI bus configuration error"}, | 344 | "3150: SCSI bus configuration error"}, |
| 345 | {0x06678100, 0, 1, | ||
| 346 | "9074: Asymmetric advanced function disk configuration"}, | ||
| 324 | {0x06690200, 0, 1, | 347 | {0x06690200, 0, 1, |
| 325 | "9041: Array protection temporarily suspended"}, | 348 | "9041: Array protection temporarily suspended"}, |
| 326 | {0x06698200, 0, 1, | 349 | {0x06698200, 0, 1, |
| 327 | "9042: Corrupt array parity detected on specified device"}, | 350 | "9042: Corrupt array parity detected on specified device"}, |
| 328 | {0x066B0200, 0, 1, | 351 | {0x066B0200, 0, 1, |
| 329 | "9030: Array no longer protected due to missing or failed disk unit"}, | 352 | "9030: Array no longer protected due to missing or failed disk unit"}, |
| 353 | {0x066B8000, 0, 1, | ||
| 354 | "9071: Link operational transition"}, | ||
| 355 | {0x066B8100, 0, 1, | ||
| 356 | "9072: Link not operational transition"}, | ||
| 330 | {0x066B8200, 0, 1, | 357 | {0x066B8200, 0, 1, |
| 331 | "9032: Array exposed but still protected"}, | 358 | "9032: Array exposed but still protected"}, |
| 332 | {0x07270000, 0, 0, | 359 | {0x07270000, 0, 0, |
| @@ -789,7 +816,7 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, | |||
| 789 | **/ | 816 | **/ |
| 790 | static void ipr_init_res_entry(struct ipr_resource_entry *res) | 817 | static void ipr_init_res_entry(struct ipr_resource_entry *res) |
| 791 | { | 818 | { |
| 792 | res->needs_sync_complete = 1; | 819 | res->needs_sync_complete = 0; |
| 793 | res->in_erp = 0; | 820 | res->in_erp = 0; |
| 794 | res->add_to_ml = 0; | 821 | res->add_to_ml = 0; |
| 795 | res->del_from_ml = 0; | 822 | res->del_from_ml = 0; |
| @@ -889,29 +916,74 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) | |||
| 889 | 916 | ||
| 890 | /** | 917 | /** |
| 891 | * ipr_log_vpd - Log the passed VPD to the error log. | 918 | * ipr_log_vpd - Log the passed VPD to the error log. |
| 892 | * @vpids: vendor/product id struct | 919 | * @vpd: vendor/product id/sn struct |
| 893 | * @serial_num: serial number string | ||
| 894 | * | 920 | * |
| 895 | * Return value: | 921 | * Return value: |
| 896 | * none | 922 | * none |
| 897 | **/ | 923 | **/ |
| 898 | static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num) | 924 | static void ipr_log_vpd(struct ipr_vpd *vpd) |
| 899 | { | 925 | { |
| 900 | char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN | 926 | char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN |
| 901 | + IPR_SERIAL_NUM_LEN]; | 927 | + IPR_SERIAL_NUM_LEN]; |
| 902 | 928 | ||
| 903 | memcpy(buffer, vpids->vendor_id, IPR_VENDOR_ID_LEN); | 929 | memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); |
| 904 | memcpy(buffer + IPR_VENDOR_ID_LEN, vpids->product_id, | 930 | memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, |
| 905 | IPR_PROD_ID_LEN); | 931 | IPR_PROD_ID_LEN); |
| 906 | buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; | 932 | buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0'; |
| 907 | ipr_err("Vendor/Product ID: %s\n", buffer); | 933 | ipr_err("Vendor/Product ID: %s\n", buffer); |
| 908 | 934 | ||
| 909 | memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN); | 935 | memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); |
| 910 | buffer[IPR_SERIAL_NUM_LEN] = '\0'; | 936 | buffer[IPR_SERIAL_NUM_LEN] = '\0'; |
| 911 | ipr_err(" Serial Number: %s\n", buffer); | 937 | ipr_err(" Serial Number: %s\n", buffer); |
| 912 | } | 938 | } |
| 913 | 939 | ||
| 914 | /** | 940 | /** |
| 941 | * ipr_log_ext_vpd - Log the passed extended VPD to the error log. | ||
| 942 | * @vpd: vendor/product id/sn/wwn struct | ||
| 943 | * | ||
| 944 | * Return value: | ||
| 945 | * none | ||
| 946 | **/ | ||
| 947 | static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) | ||
| 948 | { | ||
| 949 | ipr_log_vpd(&vpd->vpd); | ||
| 950 | ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), | ||
| 951 | be32_to_cpu(vpd->wwid[1])); | ||
| 952 | } | ||
| 953 | |||
| 954 | /** | ||
| 955 | * ipr_log_enhanced_cache_error - Log a cache error. | ||
| 956 | * @ioa_cfg: ioa config struct | ||
| 957 | * @hostrcb: hostrcb struct | ||
| 958 | * | ||
| 959 | * Return value: | ||
| 960 | * none | ||
| 961 | **/ | ||
| 962 | static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, | ||
| 963 | struct ipr_hostrcb *hostrcb) | ||
| 964 | { | ||
| 965 | struct ipr_hostrcb_type_12_error *error = | ||
| 966 | &hostrcb->hcam.u.error.u.type_12_error; | ||
| 967 | |||
| 968 | ipr_err("-----Current Configuration-----\n"); | ||
| 969 | ipr_err("Cache Directory Card Information:\n"); | ||
| 970 | ipr_log_ext_vpd(&error->ioa_vpd); | ||
| 971 | ipr_err("Adapter Card Information:\n"); | ||
| 972 | ipr_log_ext_vpd(&error->cfc_vpd); | ||
| 973 | |||
| 974 | ipr_err("-----Expected Configuration-----\n"); | ||
| 975 | ipr_err("Cache Directory Card Information:\n"); | ||
| 976 | ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); | ||
| 977 | ipr_err("Adapter Card Information:\n"); | ||
| 978 | ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); | ||
| 979 | |||
| 980 | ipr_err("Additional IOA Data: %08X %08X %08X\n", | ||
| 981 | be32_to_cpu(error->ioa_data[0]), | ||
| 982 | be32_to_cpu(error->ioa_data[1]), | ||
| 983 | be32_to_cpu(error->ioa_data[2])); | ||
| 984 | } | ||
| 985 | |||
| 986 | /** | ||
| 915 | * ipr_log_cache_error - Log a cache error. | 987 | * ipr_log_cache_error - Log a cache error. |
| 916 | * @ioa_cfg: ioa config struct | 988 | * @ioa_cfg: ioa config struct |
| 917 | * @hostrcb: hostrcb struct | 989 | * @hostrcb: hostrcb struct |
| @@ -927,17 +999,15 @@ static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 927 | 999 | ||
| 928 | ipr_err("-----Current Configuration-----\n"); | 1000 | ipr_err("-----Current Configuration-----\n"); |
| 929 | ipr_err("Cache Directory Card Information:\n"); | 1001 | ipr_err("Cache Directory Card Information:\n"); |
| 930 | ipr_log_vpd(&error->ioa_vpids, error->ioa_sn); | 1002 | ipr_log_vpd(&error->ioa_vpd); |
| 931 | ipr_err("Adapter Card Information:\n"); | 1003 | ipr_err("Adapter Card Information:\n"); |
| 932 | ipr_log_vpd(&error->cfc_vpids, error->cfc_sn); | 1004 | ipr_log_vpd(&error->cfc_vpd); |
| 933 | 1005 | ||
| 934 | ipr_err("-----Expected Configuration-----\n"); | 1006 | ipr_err("-----Expected Configuration-----\n"); |
| 935 | ipr_err("Cache Directory Card Information:\n"); | 1007 | ipr_err("Cache Directory Card Information:\n"); |
| 936 | ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids, | 1008 | ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); |
| 937 | error->ioa_last_attached_to_cfc_sn); | ||
| 938 | ipr_err("Adapter Card Information:\n"); | 1009 | ipr_err("Adapter Card Information:\n"); |
| 939 | ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids, | 1010 | ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); |
| 940 | error->cfc_last_attached_to_ioa_sn); | ||
| 941 | 1011 | ||
| 942 | ipr_err("Additional IOA Data: %08X %08X %08X\n", | 1012 | ipr_err("Additional IOA Data: %08X %08X %08X\n", |
| 943 | be32_to_cpu(error->ioa_data[0]), | 1013 | be32_to_cpu(error->ioa_data[0]), |
| @@ -946,6 +1016,46 @@ static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 946 | } | 1016 | } |
| 947 | 1017 | ||
| 948 | /** | 1018 | /** |
| 1019 | * ipr_log_enhanced_config_error - Log a configuration error. | ||
| 1020 | * @ioa_cfg: ioa config struct | ||
| 1021 | * @hostrcb: hostrcb struct | ||
| 1022 | * | ||
| 1023 | * Return value: | ||
| 1024 | * none | ||
| 1025 | **/ | ||
| 1026 | static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, | ||
| 1027 | struct ipr_hostrcb *hostrcb) | ||
| 1028 | { | ||
| 1029 | int errors_logged, i; | ||
| 1030 | struct ipr_hostrcb_device_data_entry_enhanced *dev_entry; | ||
| 1031 | struct ipr_hostrcb_type_13_error *error; | ||
| 1032 | |||
| 1033 | error = &hostrcb->hcam.u.error.u.type_13_error; | ||
| 1034 | errors_logged = be32_to_cpu(error->errors_logged); | ||
| 1035 | |||
| 1036 | ipr_err("Device Errors Detected/Logged: %d/%d\n", | ||
| 1037 | be32_to_cpu(error->errors_detected), errors_logged); | ||
| 1038 | |||
| 1039 | dev_entry = error->dev; | ||
| 1040 | |||
| 1041 | for (i = 0; i < errors_logged; i++, dev_entry++) { | ||
| 1042 | ipr_err_separator; | ||
| 1043 | |||
| 1044 | ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); | ||
| 1045 | ipr_log_ext_vpd(&dev_entry->vpd); | ||
| 1046 | |||
| 1047 | ipr_err("-----New Device Information-----\n"); | ||
| 1048 | ipr_log_ext_vpd(&dev_entry->new_vpd); | ||
| 1049 | |||
| 1050 | ipr_err("Cache Directory Card Information:\n"); | ||
| 1051 | ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); | ||
| 1052 | |||
| 1053 | ipr_err("Adapter Card Information:\n"); | ||
| 1054 | ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | /** | ||
| 949 | * ipr_log_config_error - Log a configuration error. | 1059 | * ipr_log_config_error - Log a configuration error. |
| 950 | * @ioa_cfg: ioa config struct | 1060 | * @ioa_cfg: ioa config struct |
| 951 | * @hostrcb: hostrcb struct | 1061 | * @hostrcb: hostrcb struct |
| @@ -966,30 +1076,22 @@ static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 966 | ipr_err("Device Errors Detected/Logged: %d/%d\n", | 1076 | ipr_err("Device Errors Detected/Logged: %d/%d\n", |
| 967 | be32_to_cpu(error->errors_detected), errors_logged); | 1077 | be32_to_cpu(error->errors_detected), errors_logged); |
| 968 | 1078 | ||
| 969 | dev_entry = error->dev_entry; | 1079 | dev_entry = error->dev; |
| 970 | 1080 | ||
| 971 | for (i = 0; i < errors_logged; i++, dev_entry++) { | 1081 | for (i = 0; i < errors_logged; i++, dev_entry++) { |
| 972 | ipr_err_separator; | 1082 | ipr_err_separator; |
| 973 | 1083 | ||
| 974 | if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { | 1084 | ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); |
| 975 | ipr_err("Device %d: missing\n", i + 1); | 1085 | ipr_log_vpd(&dev_entry->vpd); |
| 976 | } else { | ||
| 977 | ipr_err("Device %d: %d:%d:%d:%d\n", i + 1, | ||
| 978 | ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus, | ||
| 979 | dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun); | ||
| 980 | } | ||
| 981 | ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn); | ||
| 982 | 1086 | ||
| 983 | ipr_err("-----New Device Information-----\n"); | 1087 | ipr_err("-----New Device Information-----\n"); |
| 984 | ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn); | 1088 | ipr_log_vpd(&dev_entry->new_vpd); |
| 985 | 1089 | ||
| 986 | ipr_err("Cache Directory Card Information:\n"); | 1090 | ipr_err("Cache Directory Card Information:\n"); |
| 987 | ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids, | 1091 | ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); |
| 988 | dev_entry->ioa_last_with_dev_sn); | ||
| 989 | 1092 | ||
| 990 | ipr_err("Adapter Card Information:\n"); | 1093 | ipr_err("Adapter Card Information:\n"); |
| 991 | ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids, | 1094 | ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); |
| 992 | dev_entry->cfc_last_with_dev_sn); | ||
| 993 | 1095 | ||
| 994 | ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", | 1096 | ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", |
| 995 | be32_to_cpu(dev_entry->ioa_data[0]), | 1097 | be32_to_cpu(dev_entry->ioa_data[0]), |
| @@ -1001,6 +1103,57 @@ static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 1001 | } | 1103 | } |
| 1002 | 1104 | ||
| 1003 | /** | 1105 | /** |
| 1106 | * ipr_log_enhanced_array_error - Log an array configuration error. | ||
| 1107 | * @ioa_cfg: ioa config struct | ||
| 1108 | * @hostrcb: hostrcb struct | ||
| 1109 | * | ||
| 1110 | * Return value: | ||
| 1111 | * none | ||
| 1112 | **/ | ||
| 1113 | static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, | ||
| 1114 | struct ipr_hostrcb *hostrcb) | ||
| 1115 | { | ||
| 1116 | int i, num_entries; | ||
| 1117 | struct ipr_hostrcb_type_14_error *error; | ||
| 1118 | struct ipr_hostrcb_array_data_entry_enhanced *array_entry; | ||
| 1119 | const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; | ||
| 1120 | |||
| 1121 | error = &hostrcb->hcam.u.error.u.type_14_error; | ||
| 1122 | |||
| 1123 | ipr_err_separator; | ||
| 1124 | |||
| 1125 | ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", | ||
| 1126 | error->protection_level, | ||
| 1127 | ioa_cfg->host->host_no, | ||
| 1128 | error->last_func_vset_res_addr.bus, | ||
| 1129 | error->last_func_vset_res_addr.target, | ||
| 1130 | error->last_func_vset_res_addr.lun); | ||
| 1131 | |||
| 1132 | ipr_err_separator; | ||
| 1133 | |||
| 1134 | array_entry = error->array_member; | ||
| 1135 | num_entries = min_t(u32, be32_to_cpu(error->num_entries), | ||
| 1136 | sizeof(error->array_member)); | ||
| 1137 | |||
| 1138 | for (i = 0; i < num_entries; i++, array_entry++) { | ||
| 1139 | if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) | ||
| 1140 | continue; | ||
| 1141 | |||
| 1142 | if (be32_to_cpu(error->exposed_mode_adn) == i) | ||
| 1143 | ipr_err("Exposed Array Member %d:\n", i); | ||
| 1144 | else | ||
| 1145 | ipr_err("Array Member %d:\n", i); | ||
| 1146 | |||
| 1147 | ipr_log_ext_vpd(&array_entry->vpd); | ||
| 1148 | ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); | ||
| 1149 | ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, | ||
| 1150 | "Expected Location"); | ||
| 1151 | |||
| 1152 | ipr_err_separator; | ||
| 1153 | } | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | /** | ||
| 1004 | * ipr_log_array_error - Log an array configuration error. | 1157 | * ipr_log_array_error - Log an array configuration error. |
| 1005 | * @ioa_cfg: ioa config struct | 1158 | * @ioa_cfg: ioa config struct |
| 1006 | * @hostrcb: hostrcb struct | 1159 | * @hostrcb: hostrcb struct |
| @@ -1032,36 +1185,19 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 1032 | array_entry = error->array_member; | 1185 | array_entry = error->array_member; |
| 1033 | 1186 | ||
| 1034 | for (i = 0; i < 18; i++) { | 1187 | for (i = 0; i < 18; i++) { |
| 1035 | if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN)) | 1188 | if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) |
| 1036 | continue; | 1189 | continue; |
| 1037 | 1190 | ||
| 1038 | if (be32_to_cpu(error->exposed_mode_adn) == i) { | 1191 | if (be32_to_cpu(error->exposed_mode_adn) == i) |
| 1039 | ipr_err("Exposed Array Member %d:\n", i); | 1192 | ipr_err("Exposed Array Member %d:\n", i); |
| 1040 | } else { | 1193 | else |
| 1041 | ipr_err("Array Member %d:\n", i); | 1194 | ipr_err("Array Member %d:\n", i); |
| 1042 | } | ||
| 1043 | 1195 | ||
| 1044 | ipr_log_vpd(&array_entry->vpids, array_entry->serial_num); | 1196 | ipr_log_vpd(&array_entry->vpd); |
| 1045 | |||
| 1046 | if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { | ||
| 1047 | ipr_err("Current Location: unknown\n"); | ||
| 1048 | } else { | ||
| 1049 | ipr_err("Current Location: %d:%d:%d:%d\n", | ||
| 1050 | ioa_cfg->host->host_no, | ||
| 1051 | array_entry->dev_res_addr.bus, | ||
| 1052 | array_entry->dev_res_addr.target, | ||
| 1053 | array_entry->dev_res_addr.lun); | ||
| 1054 | } | ||
| 1055 | 1197 | ||
| 1056 | if (array_entry->expected_dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { | 1198 | ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); |
| 1057 | ipr_err("Expected Location: unknown\n"); | 1199 | ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, |
| 1058 | } else { | 1200 | "Expected Location"); |
| 1059 | ipr_err("Expected Location: %d:%d:%d:%d\n", | ||
| 1060 | ioa_cfg->host->host_no, | ||
| 1061 | array_entry->expected_dev_res_addr.bus, | ||
| 1062 | array_entry->expected_dev_res_addr.target, | ||
| 1063 | array_entry->expected_dev_res_addr.lun); | ||
| 1064 | } | ||
| 1065 | 1201 | ||
| 1066 | ipr_err_separator; | 1202 | ipr_err_separator; |
| 1067 | 1203 | ||
| @@ -1073,35 +1209,95 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, | |||
| 1073 | } | 1209 | } |
| 1074 | 1210 | ||
| 1075 | /** | 1211 | /** |
| 1076 | * ipr_log_generic_error - Log an adapter error. | 1212 | * ipr_log_hex_data - Log additional hex IOA error data. |
| 1077 | * @ioa_cfg: ioa config struct | 1213 | * @data: IOA error data |
| 1078 | * @hostrcb: hostrcb struct | 1214 | * @len: data length |
| 1079 | * | 1215 | * |
| 1080 | * Return value: | 1216 | * Return value: |
| 1081 | * none | 1217 | * none |
| 1082 | **/ | 1218 | **/ |
| 1083 | static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, | 1219 | static void ipr_log_hex_data(u32 *data, int len) |
| 1084 | struct ipr_hostrcb *hostrcb) | ||
| 1085 | { | 1220 | { |
| 1086 | int i; | 1221 | int i; |
| 1087 | int ioa_data_len = be32_to_cpu(hostrcb->hcam.length); | ||
| 1088 | 1222 | ||
| 1089 | if (ioa_data_len == 0) | 1223 | if (len == 0) |
| 1090 | return; | 1224 | return; |
| 1091 | 1225 | ||
| 1092 | ipr_err("IOA Error Data:\n"); | 1226 | for (i = 0; i < len / 4; i += 4) { |
| 1093 | ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n"); | ||
| 1094 | |||
| 1095 | for (i = 0; i < ioa_data_len / 4; i += 4) { | ||
| 1096 | ipr_err("%08X: %08X %08X %08X %08X\n", i*4, | 1227 | ipr_err("%08X: %08X %08X %08X %08X\n", i*4, |
| 1097 | be32_to_cpu(hostrcb->hcam.u.raw.data[i]), | 1228 | be32_to_cpu(data[i]), |
| 1098 | be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]), | 1229 | be32_to_cpu(data[i+1]), |
| 1099 | be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]), | 1230 | be32_to_cpu(data[i+2]), |
| 1100 | be32_to_cpu(hostrcb->hcam.u.raw.data[i+3])); | 1231 | be32_to_cpu(data[i+3])); |
| 1101 | } | 1232 | } |
| 1102 | } | 1233 | } |
| 1103 | 1234 | ||
| 1104 | /** | 1235 | /** |
| 1236 | * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error. | ||
| 1237 | * @ioa_cfg: ioa config struct | ||
| 1238 | * @hostrcb: hostrcb struct | ||
| 1239 | * | ||
| 1240 | * Return value: | ||
| 1241 | * none | ||
| 1242 | **/ | ||
| 1243 | static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, | ||
| 1244 | struct ipr_hostrcb *hostrcb) | ||
| 1245 | { | ||
| 1246 | struct ipr_hostrcb_type_17_error *error; | ||
| 1247 | |||
| 1248 | error = &hostrcb->hcam.u.error.u.type_17_error; | ||
| 1249 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; | ||
| 1250 | |||
| 1251 | ipr_err("%s\n", error->failure_reason); | ||
| 1252 | ipr_err("Remote Adapter VPD:\n"); | ||
| 1253 | ipr_log_ext_vpd(&error->vpd); | ||
| 1254 | ipr_log_hex_data(error->data, | ||
| 1255 | be32_to_cpu(hostrcb->hcam.length) - | ||
| 1256 | (offsetof(struct ipr_hostrcb_error, u) + | ||
| 1257 | offsetof(struct ipr_hostrcb_type_17_error, data))); | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | /** | ||
| 1261 | * ipr_log_dual_ioa_error - Log a dual adapter error. | ||
| 1262 | * @ioa_cfg: ioa config struct | ||
| 1263 | * @hostrcb: hostrcb struct | ||
| 1264 | * | ||
| 1265 | * Return value: | ||
| 1266 | * none | ||
| 1267 | **/ | ||
| 1268 | static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, | ||
| 1269 | struct ipr_hostrcb *hostrcb) | ||
| 1270 | { | ||
| 1271 | struct ipr_hostrcb_type_07_error *error; | ||
| 1272 | |||
| 1273 | error = &hostrcb->hcam.u.error.u.type_07_error; | ||
| 1274 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; | ||
| 1275 | |||
| 1276 | ipr_err("%s\n", error->failure_reason); | ||
| 1277 | ipr_err("Remote Adapter VPD:\n"); | ||
| 1278 | ipr_log_vpd(&error->vpd); | ||
| 1279 | ipr_log_hex_data(error->data, | ||
| 1280 | be32_to_cpu(hostrcb->hcam.length) - | ||
| 1281 | (offsetof(struct ipr_hostrcb_error, u) + | ||
| 1282 | offsetof(struct ipr_hostrcb_type_07_error, data))); | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | /** | ||
| 1286 | * ipr_log_generic_error - Log an adapter error. | ||
| 1287 | * @ioa_cfg: ioa config struct | ||
| 1288 | * @hostrcb: hostrcb struct | ||
| 1289 | * | ||
| 1290 | * Return value: | ||
| 1291 | * none | ||
| 1292 | **/ | ||
| 1293 | static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, | ||
| 1294 | struct ipr_hostrcb *hostrcb) | ||
| 1295 | { | ||
| 1296 | ipr_log_hex_data(hostrcb->hcam.u.raw.data, | ||
| 1297 | be32_to_cpu(hostrcb->hcam.length)); | ||
| 1298 | } | ||
| 1299 | |||
| 1300 | /** | ||
| 1105 | * ipr_get_error - Find the specfied IOASC in the ipr_error_table. | 1301 | * ipr_get_error - Find the specfied IOASC in the ipr_error_table. |
| 1106 | * @ioasc: IOASC | 1302 | * @ioasc: IOASC |
| 1107 | * | 1303 | * |
| @@ -1172,11 +1368,10 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, | |||
| 1172 | 1368 | ||
| 1173 | if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) | 1369 | if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) |
| 1174 | return; | 1370 | return; |
| 1371 | if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) | ||
| 1372 | hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); | ||
| 1175 | 1373 | ||
| 1176 | switch (hostrcb->hcam.overlay_id) { | 1374 | switch (hostrcb->hcam.overlay_id) { |
| 1177 | case IPR_HOST_RCB_OVERLAY_ID_1: | ||
| 1178 | ipr_log_generic_error(ioa_cfg, hostrcb); | ||
| 1179 | break; | ||
| 1180 | case IPR_HOST_RCB_OVERLAY_ID_2: | 1375 | case IPR_HOST_RCB_OVERLAY_ID_2: |
| 1181 | ipr_log_cache_error(ioa_cfg, hostrcb); | 1376 | ipr_log_cache_error(ioa_cfg, hostrcb); |
| 1182 | break; | 1377 | break; |
| @@ -1187,13 +1382,26 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, | |||
| 1187 | case IPR_HOST_RCB_OVERLAY_ID_6: | 1382 | case IPR_HOST_RCB_OVERLAY_ID_6: |
| 1188 | ipr_log_array_error(ioa_cfg, hostrcb); | 1383 | ipr_log_array_error(ioa_cfg, hostrcb); |
| 1189 | break; | 1384 | break; |
| 1190 | case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: | 1385 | case IPR_HOST_RCB_OVERLAY_ID_7: |
| 1191 | ipr_log_generic_error(ioa_cfg, hostrcb); | 1386 | ipr_log_dual_ioa_error(ioa_cfg, hostrcb); |
| 1387 | break; | ||
| 1388 | case IPR_HOST_RCB_OVERLAY_ID_12: | ||
| 1389 | ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); | ||
| 1390 | break; | ||
| 1391 | case IPR_HOST_RCB_OVERLAY_ID_13: | ||
| 1392 | ipr_log_enhanced_config_error(ioa_cfg, hostrcb); | ||
| 1393 | break; | ||
| 1394 | case IPR_HOST_RCB_OVERLAY_ID_14: | ||
| 1395 | case IPR_HOST_RCB_OVERLAY_ID_16: | ||
| 1396 | ipr_log_enhanced_array_error(ioa_cfg, hostrcb); | ||
| 1192 | break; | 1397 | break; |
| 1398 | case IPR_HOST_RCB_OVERLAY_ID_17: | ||
| 1399 | ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); | ||
| 1400 | break; | ||
| 1401 | case IPR_HOST_RCB_OVERLAY_ID_1: | ||
| 1402 | case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: | ||
| 1193 | default: | 1403 | default: |
| 1194 | dev_err(&ioa_cfg->pdev->dev, | 1404 | ipr_log_generic_error(ioa_cfg, hostrcb); |
| 1195 | "Unknown error received. Overlay ID: %d\n", | ||
| 1196 | hostrcb->hcam.overlay_id); | ||
| 1197 | break; | 1405 | break; |
| 1198 | } | 1406 | } |
| 1199 | } | 1407 | } |
| @@ -1972,6 +2180,103 @@ static struct bin_attribute ipr_trace_attr = { | |||
| 1972 | }; | 2180 | }; |
| 1973 | #endif | 2181 | #endif |
| 1974 | 2182 | ||
| 2183 | static const struct { | ||
| 2184 | enum ipr_cache_state state; | ||
| 2185 | char *name; | ||
| 2186 | } cache_state [] = { | ||
| 2187 | { CACHE_NONE, "none" }, | ||
| 2188 | { CACHE_DISABLED, "disabled" }, | ||
| 2189 | { CACHE_ENABLED, "enabled" } | ||
| 2190 | }; | ||
| 2191 | |||
| 2192 | /** | ||
| 2193 | * ipr_show_write_caching - Show the write caching attribute | ||
| 2194 | * @class_dev: class device struct | ||
| 2195 | * @buf: buffer | ||
| 2196 | * | ||
| 2197 | * Return value: | ||
| 2198 | * number of bytes printed to buffer | ||
| 2199 | **/ | ||
| 2200 | static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf) | ||
| 2201 | { | ||
| 2202 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
| 2203 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; | ||
| 2204 | unsigned long lock_flags = 0; | ||
| 2205 | int i, len = 0; | ||
| 2206 | |||
| 2207 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2208 | for (i = 0; i < ARRAY_SIZE(cache_state); i++) { | ||
| 2209 | if (cache_state[i].state == ioa_cfg->cache_state) { | ||
| 2210 | len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name); | ||
| 2211 | break; | ||
| 2212 | } | ||
| 2213 | } | ||
| 2214 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2215 | return len; | ||
| 2216 | } | ||
| 2217 | |||
| 2218 | |||
| 2219 | /** | ||
| 2220 | * ipr_store_write_caching - Enable/disable adapter write cache | ||
| 2221 | * @class_dev: class_device struct | ||
| 2222 | * @buf: buffer | ||
| 2223 | * @count: buffer size | ||
| 2224 | * | ||
| 2225 | * This function will enable/disable adapter write cache. | ||
| 2226 | * | ||
| 2227 | * Return value: | ||
| 2228 | * count on success / other on failure | ||
| 2229 | **/ | ||
| 2230 | static ssize_t ipr_store_write_caching(struct class_device *class_dev, | ||
| 2231 | const char *buf, size_t count) | ||
| 2232 | { | ||
| 2233 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
| 2234 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; | ||
| 2235 | unsigned long lock_flags = 0; | ||
| 2236 | enum ipr_cache_state new_state = CACHE_INVALID; | ||
| 2237 | int i; | ||
| 2238 | |||
| 2239 | if (!capable(CAP_SYS_ADMIN)) | ||
| 2240 | return -EACCES; | ||
| 2241 | if (ioa_cfg->cache_state == CACHE_NONE) | ||
| 2242 | return -EINVAL; | ||
| 2243 | |||
| 2244 | for (i = 0; i < ARRAY_SIZE(cache_state); i++) { | ||
| 2245 | if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) { | ||
| 2246 | new_state = cache_state[i].state; | ||
| 2247 | break; | ||
| 2248 | } | ||
| 2249 | } | ||
| 2250 | |||
| 2251 | if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED) | ||
| 2252 | return -EINVAL; | ||
| 2253 | |||
| 2254 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2255 | if (ioa_cfg->cache_state == new_state) { | ||
| 2256 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2257 | return count; | ||
| 2258 | } | ||
| 2259 | |||
| 2260 | ioa_cfg->cache_state = new_state; | ||
| 2261 | dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n", | ||
| 2262 | new_state == CACHE_ENABLED ? "Enabling" : "Disabling"); | ||
| 2263 | if (!ioa_cfg->in_reset_reload) | ||
| 2264 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); | ||
| 2265 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2266 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
| 2267 | |||
| 2268 | return count; | ||
| 2269 | } | ||
| 2270 | |||
| 2271 | static struct class_device_attribute ipr_ioa_cache_attr = { | ||
| 2272 | .attr = { | ||
| 2273 | .name = "write_cache", | ||
| 2274 | .mode = S_IRUGO | S_IWUSR, | ||
| 2275 | }, | ||
| 2276 | .show = ipr_show_write_caching, | ||
| 2277 | .store = ipr_store_write_caching | ||
| 2278 | }; | ||
| 2279 | |||
| 1975 | /** | 2280 | /** |
| 1976 | * ipr_show_fw_version - Show the firmware version | 2281 | * ipr_show_fw_version - Show the firmware version |
| 1977 | * @class_dev: class device struct | 2282 | * @class_dev: class device struct |
| @@ -2112,6 +2417,74 @@ static struct class_device_attribute ipr_diagnostics_attr = { | |||
| 2112 | }; | 2417 | }; |
| 2113 | 2418 | ||
| 2114 | /** | 2419 | /** |
| 2420 | * ipr_show_adapter_state - Show the adapter's state | ||
| 2421 | * @class_dev: class device struct | ||
| 2422 | * @buf: buffer | ||
| 2423 | * | ||
| 2424 | * Return value: | ||
| 2425 | * number of bytes printed to buffer | ||
| 2426 | **/ | ||
| 2427 | static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf) | ||
| 2428 | { | ||
| 2429 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
| 2430 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; | ||
| 2431 | unsigned long lock_flags = 0; | ||
| 2432 | int len; | ||
| 2433 | |||
| 2434 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2435 | if (ioa_cfg->ioa_is_dead) | ||
| 2436 | len = snprintf(buf, PAGE_SIZE, "offline\n"); | ||
| 2437 | else | ||
| 2438 | len = snprintf(buf, PAGE_SIZE, "online\n"); | ||
| 2439 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2440 | return len; | ||
| 2441 | } | ||
| 2442 | |||
| 2443 | /** | ||
| 2444 | * ipr_store_adapter_state - Change adapter state | ||
| 2445 | * @class_dev: class_device struct | ||
| 2446 | * @buf: buffer | ||
| 2447 | * @count: buffer size | ||
| 2448 | * | ||
| 2449 | * This function will change the adapter's state. | ||
| 2450 | * | ||
| 2451 | * Return value: | ||
| 2452 | * count on success / other on failure | ||
| 2453 | **/ | ||
| 2454 | static ssize_t ipr_store_adapter_state(struct class_device *class_dev, | ||
| 2455 | const char *buf, size_t count) | ||
| 2456 | { | ||
| 2457 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
| 2458 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; | ||
| 2459 | unsigned long lock_flags; | ||
| 2460 | int result = count; | ||
| 2461 | |||
| 2462 | if (!capable(CAP_SYS_ADMIN)) | ||
| 2463 | return -EACCES; | ||
| 2464 | |||
| 2465 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2466 | if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) { | ||
| 2467 | ioa_cfg->ioa_is_dead = 0; | ||
| 2468 | ioa_cfg->reset_retries = 0; | ||
| 2469 | ioa_cfg->in_ioa_bringdown = 0; | ||
| 2470 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); | ||
| 2471 | } | ||
| 2472 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2473 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
| 2474 | |||
| 2475 | return result; | ||
| 2476 | } | ||
| 2477 | |||
| 2478 | static struct class_device_attribute ipr_ioa_state_attr = { | ||
| 2479 | .attr = { | ||
| 2480 | .name = "state", | ||
| 2481 | .mode = S_IRUGO | S_IWUSR, | ||
| 2482 | }, | ||
| 2483 | .show = ipr_show_adapter_state, | ||
| 2484 | .store = ipr_store_adapter_state | ||
| 2485 | }; | ||
| 2486 | |||
| 2487 | /** | ||
| 2115 | * ipr_store_reset_adapter - Reset the adapter | 2488 | * ipr_store_reset_adapter - Reset the adapter |
| 2116 | * @class_dev: class_device struct | 2489 | * @class_dev: class_device struct |
| 2117 | * @buf: buffer | 2490 | * @buf: buffer |
| @@ -2183,7 +2556,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) | |||
| 2183 | num_elem = buf_len / bsize_elem; | 2556 | num_elem = buf_len / bsize_elem; |
| 2184 | 2557 | ||
| 2185 | /* Allocate a scatter/gather list for the DMA */ | 2558 | /* Allocate a scatter/gather list for the DMA */ |
| 2186 | sglist = kmalloc(sizeof(struct ipr_sglist) + | 2559 | sglist = kzalloc(sizeof(struct ipr_sglist) + |
| 2187 | (sizeof(struct scatterlist) * (num_elem - 1)), | 2560 | (sizeof(struct scatterlist) * (num_elem - 1)), |
| 2188 | GFP_KERNEL); | 2561 | GFP_KERNEL); |
| 2189 | 2562 | ||
| @@ -2192,9 +2565,6 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) | |||
| 2192 | return NULL; | 2565 | return NULL; |
| 2193 | } | 2566 | } |
| 2194 | 2567 | ||
| 2195 | memset(sglist, 0, sizeof(struct ipr_sglist) + | ||
| 2196 | (sizeof(struct scatterlist) * (num_elem - 1))); | ||
| 2197 | |||
| 2198 | scatterlist = sglist->scatterlist; | 2568 | scatterlist = sglist->scatterlist; |
| 2199 | 2569 | ||
| 2200 | sglist->order = order; | 2570 | sglist->order = order; |
| @@ -2289,31 +2659,24 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, | |||
| 2289 | } | 2659 | } |
| 2290 | 2660 | ||
| 2291 | /** | 2661 | /** |
| 2292 | * ipr_map_ucode_buffer - Map a microcode download buffer | 2662 | * ipr_build_ucode_ioadl - Build a microcode download IOADL |
| 2293 | * @ipr_cmd: ipr command struct | 2663 | * @ipr_cmd: ipr command struct |
| 2294 | * @sglist: scatter/gather list | 2664 | * @sglist: scatter/gather list |
| 2295 | * @len: total length of download buffer | ||
| 2296 | * | 2665 | * |
| 2297 | * Maps a microcode download scatter/gather list for DMA and | 2666 | * Builds a microcode download IOA data list (IOADL). |
| 2298 | * builds the IOADL. | ||
| 2299 | * | 2667 | * |
| 2300 | * Return value: | ||
| 2301 | * 0 on success / -EIO on failure | ||
| 2302 | **/ | 2668 | **/ |
| 2303 | static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd, | 2669 | static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, |
| 2304 | struct ipr_sglist *sglist, int len) | 2670 | struct ipr_sglist *sglist) |
| 2305 | { | 2671 | { |
| 2306 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
| 2307 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 2672 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
| 2308 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | 2673 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; |
| 2309 | struct scatterlist *scatterlist = sglist->scatterlist; | 2674 | struct scatterlist *scatterlist = sglist->scatterlist; |
| 2310 | int i; | 2675 | int i; |
| 2311 | 2676 | ||
| 2312 | ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist, | 2677 | ipr_cmd->dma_use_sg = sglist->num_dma_sg; |
| 2313 | sglist->num_sg, DMA_TO_DEVICE); | ||
| 2314 | |||
| 2315 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | 2678 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; |
| 2316 | ioarcb->write_data_transfer_length = cpu_to_be32(len); | 2679 | ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); |
| 2317 | ioarcb->write_ioadl_len = | 2680 | ioarcb->write_ioadl_len = |
| 2318 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); | 2681 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); |
| 2319 | 2682 | ||
| @@ -2324,15 +2687,52 @@ static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd, | |||
| 2324 | cpu_to_be32(sg_dma_address(&scatterlist[i])); | 2687 | cpu_to_be32(sg_dma_address(&scatterlist[i])); |
| 2325 | } | 2688 | } |
| 2326 | 2689 | ||
| 2327 | if (likely(ipr_cmd->dma_use_sg)) { | 2690 | ioadl[i-1].flags_and_data_len |= |
| 2328 | ioadl[i-1].flags_and_data_len |= | 2691 | cpu_to_be32(IPR_IOADL_FLAGS_LAST); |
| 2329 | cpu_to_be32(IPR_IOADL_FLAGS_LAST); | 2692 | } |
| 2693 | |||
| 2694 | /** | ||
| 2695 | * ipr_update_ioa_ucode - Update IOA's microcode | ||
| 2696 | * @ioa_cfg: ioa config struct | ||
| 2697 | * @sglist: scatter/gather list | ||
| 2698 | * | ||
| 2699 | * Initiate an adapter reset to update the IOA's microcode | ||
| 2700 | * | ||
| 2701 | * Return value: | ||
| 2702 | * 0 on success / -EIO on failure | ||
| 2703 | **/ | ||
| 2704 | static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, | ||
| 2705 | struct ipr_sglist *sglist) | ||
| 2706 | { | ||
| 2707 | unsigned long lock_flags; | ||
| 2708 | |||
| 2709 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2710 | |||
| 2711 | if (ioa_cfg->ucode_sglist) { | ||
| 2712 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2713 | dev_err(&ioa_cfg->pdev->dev, | ||
| 2714 | "Microcode download already in progress\n"); | ||
| 2715 | return -EIO; | ||
| 2330 | } | 2716 | } |
| 2331 | else { | 2717 | |
| 2332 | dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); | 2718 | sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist, |
| 2719 | sglist->num_sg, DMA_TO_DEVICE); | ||
| 2720 | |||
| 2721 | if (!sglist->num_dma_sg) { | ||
| 2722 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2723 | dev_err(&ioa_cfg->pdev->dev, | ||
| 2724 | "Failed to map microcode download buffer!\n"); | ||
| 2333 | return -EIO; | 2725 | return -EIO; |
| 2334 | } | 2726 | } |
| 2335 | 2727 | ||
| 2728 | ioa_cfg->ucode_sglist = sglist; | ||
| 2729 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); | ||
| 2730 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2731 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
| 2732 | |||
| 2733 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2734 | ioa_cfg->ucode_sglist = NULL; | ||
| 2735 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2336 | return 0; | 2736 | return 0; |
| 2337 | } | 2737 | } |
| 2338 | 2738 | ||
| @@ -2355,7 +2755,6 @@ static ssize_t ipr_store_update_fw(struct class_device *class_dev, | |||
| 2355 | struct ipr_ucode_image_header *image_hdr; | 2755 | struct ipr_ucode_image_header *image_hdr; |
| 2356 | const struct firmware *fw_entry; | 2756 | const struct firmware *fw_entry; |
| 2357 | struct ipr_sglist *sglist; | 2757 | struct ipr_sglist *sglist; |
| 2358 | unsigned long lock_flags; | ||
| 2359 | char fname[100]; | 2758 | char fname[100]; |
| 2360 | char *src; | 2759 | char *src; |
| 2361 | int len, result, dnld_size; | 2760 | int len, result, dnld_size; |
| @@ -2396,35 +2795,17 @@ static ssize_t ipr_store_update_fw(struct class_device *class_dev, | |||
| 2396 | if (result) { | 2795 | if (result) { |
| 2397 | dev_err(&ioa_cfg->pdev->dev, | 2796 | dev_err(&ioa_cfg->pdev->dev, |
| 2398 | "Microcode buffer copy to DMA buffer failed\n"); | 2797 | "Microcode buffer copy to DMA buffer failed\n"); |
| 2399 | ipr_free_ucode_buffer(sglist); | 2798 | goto out; |
| 2400 | release_firmware(fw_entry); | ||
| 2401 | return result; | ||
| 2402 | } | ||
| 2403 | |||
| 2404 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2405 | |||
| 2406 | if (ioa_cfg->ucode_sglist) { | ||
| 2407 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2408 | dev_err(&ioa_cfg->pdev->dev, | ||
| 2409 | "Microcode download already in progress\n"); | ||
| 2410 | ipr_free_ucode_buffer(sglist); | ||
| 2411 | release_firmware(fw_entry); | ||
| 2412 | return -EIO; | ||
| 2413 | } | 2799 | } |
| 2414 | 2800 | ||
| 2415 | ioa_cfg->ucode_sglist = sglist; | 2801 | result = ipr_update_ioa_ucode(ioa_cfg, sglist); |
| 2416 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); | ||
| 2417 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2418 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | ||
| 2419 | |||
| 2420 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 2421 | ioa_cfg->ucode_sglist = NULL; | ||
| 2422 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 2423 | 2802 | ||
| 2803 | if (!result) | ||
| 2804 | result = count; | ||
| 2805 | out: | ||
| 2424 | ipr_free_ucode_buffer(sglist); | 2806 | ipr_free_ucode_buffer(sglist); |
| 2425 | release_firmware(fw_entry); | 2807 | release_firmware(fw_entry); |
| 2426 | 2808 | return result; | |
| 2427 | return count; | ||
| 2428 | } | 2809 | } |
| 2429 | 2810 | ||
| 2430 | static struct class_device_attribute ipr_update_fw_attr = { | 2811 | static struct class_device_attribute ipr_update_fw_attr = { |
| @@ -2439,8 +2820,10 @@ static struct class_device_attribute *ipr_ioa_attrs[] = { | |||
| 2439 | &ipr_fw_version_attr, | 2820 | &ipr_fw_version_attr, |
| 2440 | &ipr_log_level_attr, | 2821 | &ipr_log_level_attr, |
| 2441 | &ipr_diagnostics_attr, | 2822 | &ipr_diagnostics_attr, |
| 2823 | &ipr_ioa_state_attr, | ||
| 2442 | &ipr_ioa_reset_attr, | 2824 | &ipr_ioa_reset_attr, |
| 2443 | &ipr_update_fw_attr, | 2825 | &ipr_update_fw_attr, |
| 2826 | &ipr_ioa_cache_attr, | ||
| 2444 | NULL, | 2827 | NULL, |
| 2445 | }; | 2828 | }; |
| 2446 | 2829 | ||
| @@ -2548,14 +2931,13 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) | |||
| 2548 | unsigned long lock_flags = 0; | 2931 | unsigned long lock_flags = 0; |
| 2549 | 2932 | ||
| 2550 | ENTER; | 2933 | ENTER; |
| 2551 | dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL); | 2934 | dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); |
| 2552 | 2935 | ||
| 2553 | if (!dump) { | 2936 | if (!dump) { |
| 2554 | ipr_err("Dump memory allocation failed\n"); | 2937 | ipr_err("Dump memory allocation failed\n"); |
| 2555 | return -ENOMEM; | 2938 | return -ENOMEM; |
| 2556 | } | 2939 | } |
| 2557 | 2940 | ||
| 2558 | memset(dump, 0, sizeof(struct ipr_dump)); | ||
| 2559 | kref_init(&dump->kref); | 2941 | kref_init(&dump->kref); |
| 2560 | dump->ioa_cfg = ioa_cfg; | 2942 | dump->ioa_cfg = ioa_cfg; |
| 2561 | 2943 | ||
| @@ -2824,8 +3206,10 @@ static int ipr_slave_configure(struct scsi_device *sdev) | |||
| 2824 | if (res) { | 3206 | if (res) { |
| 2825 | if (ipr_is_af_dasd_device(res)) | 3207 | if (ipr_is_af_dasd_device(res)) |
| 2826 | sdev->type = TYPE_RAID; | 3208 | sdev->type = TYPE_RAID; |
| 2827 | if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) | 3209 | if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) { |
| 2828 | sdev->scsi_level = 4; | 3210 | sdev->scsi_level = 4; |
| 3211 | sdev->no_uld_attach = 1; | ||
| 3212 | } | ||
| 2829 | if (ipr_is_vset_device(res)) { | 3213 | if (ipr_is_vset_device(res)) { |
| 2830 | sdev->timeout = IPR_VSET_RW_TIMEOUT; | 3214 | sdev->timeout = IPR_VSET_RW_TIMEOUT; |
| 2831 | blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); | 3215 | blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); |
| @@ -2848,13 +3232,14 @@ static int ipr_slave_configure(struct scsi_device *sdev) | |||
| 2848 | * handling new commands. | 3232 | * handling new commands. |
| 2849 | * | 3233 | * |
| 2850 | * Return value: | 3234 | * Return value: |
| 2851 | * 0 on success | 3235 | * 0 on success / -ENXIO if device does not exist |
| 2852 | **/ | 3236 | **/ |
| 2853 | static int ipr_slave_alloc(struct scsi_device *sdev) | 3237 | static int ipr_slave_alloc(struct scsi_device *sdev) |
| 2854 | { | 3238 | { |
| 2855 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; | 3239 | struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; |
| 2856 | struct ipr_resource_entry *res; | 3240 | struct ipr_resource_entry *res; |
| 2857 | unsigned long lock_flags; | 3241 | unsigned long lock_flags; |
| 3242 | int rc = -ENXIO; | ||
| 2858 | 3243 | ||
| 2859 | sdev->hostdata = NULL; | 3244 | sdev->hostdata = NULL; |
| 2860 | 3245 | ||
| @@ -2868,14 +3253,16 @@ static int ipr_slave_alloc(struct scsi_device *sdev) | |||
| 2868 | res->add_to_ml = 0; | 3253 | res->add_to_ml = 0; |
| 2869 | res->in_erp = 0; | 3254 | res->in_erp = 0; |
| 2870 | sdev->hostdata = res; | 3255 | sdev->hostdata = res; |
| 2871 | res->needs_sync_complete = 1; | 3256 | if (!ipr_is_naca_model(res)) |
| 3257 | res->needs_sync_complete = 1; | ||
| 3258 | rc = 0; | ||
| 2872 | break; | 3259 | break; |
| 2873 | } | 3260 | } |
| 2874 | } | 3261 | } |
| 2875 | 3262 | ||
| 2876 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3263 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
| 2877 | 3264 | ||
| 2878 | return 0; | 3265 | return rc; |
| 2879 | } | 3266 | } |
| 2880 | 3267 | ||
| 2881 | /** | 3268 | /** |
| @@ -2939,7 +3326,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) | |||
| 2939 | ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; | 3326 | ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; |
| 2940 | res = scsi_cmd->device->hostdata; | 3327 | res = scsi_cmd->device->hostdata; |
| 2941 | 3328 | ||
| 2942 | if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) | 3329 | if (!res) |
| 2943 | return FAILED; | 3330 | return FAILED; |
| 2944 | 3331 | ||
| 2945 | /* | 3332 | /* |
| @@ -3131,7 +3518,8 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) | |||
| 3131 | } | 3518 | } |
| 3132 | 3519 | ||
| 3133 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 3520 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
| 3134 | res->needs_sync_complete = 1; | 3521 | if (!ipr_is_naca_model(res)) |
| 3522 | res->needs_sync_complete = 1; | ||
| 3135 | 3523 | ||
| 3136 | LEAVE; | 3524 | LEAVE; |
| 3137 | return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); | 3525 | return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); |
| @@ -3435,7 +3823,8 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) | |||
| 3435 | } | 3823 | } |
| 3436 | 3824 | ||
| 3437 | if (res) { | 3825 | if (res) { |
| 3438 | res->needs_sync_complete = 1; | 3826 | if (!ipr_is_naca_model(res)) |
| 3827 | res->needs_sync_complete = 1; | ||
| 3439 | res->in_erp = 0; | 3828 | res->in_erp = 0; |
| 3440 | } | 3829 | } |
| 3441 | ipr_unmap_sglist(ioa_cfg, ipr_cmd); | 3830 | ipr_unmap_sglist(ioa_cfg, ipr_cmd); |
| @@ -3705,6 +4094,30 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) | |||
| 3705 | } | 4094 | } |
| 3706 | 4095 | ||
| 3707 | /** | 4096 | /** |
| 4097 | * ipr_get_autosense - Copy autosense data to sense buffer | ||
| 4098 | * @ipr_cmd: ipr command struct | ||
| 4099 | * | ||
| 4100 | * This function copies the autosense buffer to the buffer | ||
| 4101 | * in the scsi_cmd, if there is autosense available. | ||
| 4102 | * | ||
| 4103 | * Return value: | ||
| 4104 | * 1 if autosense was available / 0 if not | ||
| 4105 | **/ | ||
| 4106 | static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) | ||
| 4107 | { | ||
| 4108 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | ||
| 4109 | |||
| 4110 | if ((be32_to_cpu(ioasa->ioasc_specific) & | ||
| 4111 | (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0) | ||
| 4112 | return 0; | ||
| 4113 | |||
| 4114 | memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, | ||
| 4115 | min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), | ||
| 4116 | SCSI_SENSE_BUFFERSIZE)); | ||
| 4117 | return 1; | ||
| 4118 | } | ||
| 4119 | |||
| 4120 | /** | ||
| 3708 | * ipr_erp_start - Process an error response for a SCSI op | 4121 | * ipr_erp_start - Process an error response for a SCSI op |
| 3709 | * @ioa_cfg: ioa config struct | 4122 | * @ioa_cfg: ioa config struct |
| 3710 | * @ipr_cmd: ipr command struct | 4123 | * @ipr_cmd: ipr command struct |
| @@ -3734,14 +4147,19 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, | |||
| 3734 | 4147 | ||
| 3735 | switch (ioasc & IPR_IOASC_IOASC_MASK) { | 4148 | switch (ioasc & IPR_IOASC_IOASC_MASK) { |
| 3736 | case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: | 4149 | case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: |
| 3737 | scsi_cmd->result |= (DID_IMM_RETRY << 16); | 4150 | if (ipr_is_naca_model(res)) |
| 4151 | scsi_cmd->result |= (DID_ABORT << 16); | ||
| 4152 | else | ||
| 4153 | scsi_cmd->result |= (DID_IMM_RETRY << 16); | ||
| 3738 | break; | 4154 | break; |
| 3739 | case IPR_IOASC_IR_RESOURCE_HANDLE: | 4155 | case IPR_IOASC_IR_RESOURCE_HANDLE: |
| 4156 | case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA: | ||
| 3740 | scsi_cmd->result |= (DID_NO_CONNECT << 16); | 4157 | scsi_cmd->result |= (DID_NO_CONNECT << 16); |
| 3741 | break; | 4158 | break; |
| 3742 | case IPR_IOASC_HW_SEL_TIMEOUT: | 4159 | case IPR_IOASC_HW_SEL_TIMEOUT: |
| 3743 | scsi_cmd->result |= (DID_NO_CONNECT << 16); | 4160 | scsi_cmd->result |= (DID_NO_CONNECT << 16); |
| 3744 | res->needs_sync_complete = 1; | 4161 | if (!ipr_is_naca_model(res)) |
| 4162 | res->needs_sync_complete = 1; | ||
| 3745 | break; | 4163 | break; |
| 3746 | case IPR_IOASC_SYNC_REQUIRED: | 4164 | case IPR_IOASC_SYNC_REQUIRED: |
| 3747 | if (!res->in_erp) | 4165 | if (!res->in_erp) |
| @@ -3749,6 +4167,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, | |||
| 3749 | scsi_cmd->result |= (DID_IMM_RETRY << 16); | 4167 | scsi_cmd->result |= (DID_IMM_RETRY << 16); |
| 3750 | break; | 4168 | break; |
| 3751 | case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ | 4169 | case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ |
| 4170 | case IPR_IOASA_IR_DUAL_IOA_DISABLED: | ||
| 3752 | scsi_cmd->result |= (DID_PASSTHROUGH << 16); | 4171 | scsi_cmd->result |= (DID_PASSTHROUGH << 16); |
| 3753 | break; | 4172 | break; |
| 3754 | case IPR_IOASC_BUS_WAS_RESET: | 4173 | case IPR_IOASC_BUS_WAS_RESET: |
| @@ -3760,21 +4179,27 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, | |||
| 3760 | if (!res->resetting_device) | 4179 | if (!res->resetting_device) |
| 3761 | scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); | 4180 | scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); |
| 3762 | scsi_cmd->result |= (DID_ERROR << 16); | 4181 | scsi_cmd->result |= (DID_ERROR << 16); |
| 3763 | res->needs_sync_complete = 1; | 4182 | if (!ipr_is_naca_model(res)) |
| 4183 | res->needs_sync_complete = 1; | ||
| 3764 | break; | 4184 | break; |
| 3765 | case IPR_IOASC_HW_DEV_BUS_STATUS: | 4185 | case IPR_IOASC_HW_DEV_BUS_STATUS: |
| 3766 | scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); | 4186 | scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); |
| 3767 | if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { | 4187 | if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { |
| 3768 | ipr_erp_cancel_all(ipr_cmd); | 4188 | if (!ipr_get_autosense(ipr_cmd)) { |
| 3769 | return; | 4189 | if (!ipr_is_naca_model(res)) { |
| 4190 | ipr_erp_cancel_all(ipr_cmd); | ||
| 4191 | return; | ||
| 4192 | } | ||
| 4193 | } | ||
| 3770 | } | 4194 | } |
| 3771 | res->needs_sync_complete = 1; | 4195 | if (!ipr_is_naca_model(res)) |
| 4196 | res->needs_sync_complete = 1; | ||
| 3772 | break; | 4197 | break; |
| 3773 | case IPR_IOASC_NR_INIT_CMD_REQUIRED: | 4198 | case IPR_IOASC_NR_INIT_CMD_REQUIRED: |
| 3774 | break; | 4199 | break; |
| 3775 | default: | 4200 | default: |
| 3776 | scsi_cmd->result |= (DID_ERROR << 16); | 4201 | scsi_cmd->result |= (DID_ERROR << 16); |
| 3777 | if (!ipr_is_vset_device(res)) | 4202 | if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) |
| 3778 | res->needs_sync_complete = 1; | 4203 | res->needs_sync_complete = 1; |
| 3779 | break; | 4204 | break; |
| 3780 | } | 4205 | } |
| @@ -4073,6 +4498,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) | |||
| 4073 | ioa_cfg->in_reset_reload = 0; | 4498 | ioa_cfg->in_reset_reload = 0; |
| 4074 | ioa_cfg->allow_cmds = 1; | 4499 | ioa_cfg->allow_cmds = 1; |
| 4075 | ioa_cfg->reset_cmd = NULL; | 4500 | ioa_cfg->reset_cmd = NULL; |
| 4501 | ioa_cfg->doorbell |= IPR_RUNTIME_RESET; | ||
| 4076 | 4502 | ||
| 4077 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | 4503 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { |
| 4078 | if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { | 4504 | if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { |
| @@ -4146,7 +4572,7 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) | |||
| 4146 | ipr_cmd->job_step = ipr_ioa_reset_done; | 4572 | ipr_cmd->job_step = ipr_ioa_reset_done; |
| 4147 | 4573 | ||
| 4148 | list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { | 4574 | list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { |
| 4149 | if (!ipr_is_af_dasd_device(res)) | 4575 | if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)) |
| 4150 | continue; | 4576 | continue; |
| 4151 | 4577 | ||
| 4152 | ipr_cmd->u.res = res; | 4578 | ipr_cmd->u.res = res; |
| @@ -4179,6 +4605,36 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) | |||
| 4179 | } | 4605 | } |
| 4180 | 4606 | ||
| 4181 | /** | 4607 | /** |
| 4608 | * ipr_setup_write_cache - Disable write cache if needed | ||
| 4609 | * @ipr_cmd: ipr command struct | ||
| 4610 | * | ||
| 4611 | * This function sets up adapters write cache to desired setting | ||
| 4612 | * | ||
| 4613 | * Return value: | ||
| 4614 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | ||
| 4615 | **/ | ||
| 4616 | static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd) | ||
| 4617 | { | ||
| 4618 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
| 4619 | |||
| 4620 | ipr_cmd->job_step = ipr_set_supported_devs; | ||
| 4621 | ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, | ||
| 4622 | struct ipr_resource_entry, queue); | ||
| 4623 | |||
| 4624 | if (ioa_cfg->cache_state != CACHE_DISABLED) | ||
| 4625 | return IPR_RC_JOB_CONTINUE; | ||
| 4626 | |||
| 4627 | ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); | ||
| 4628 | ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; | ||
| 4629 | ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; | ||
| 4630 | ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; | ||
| 4631 | |||
| 4632 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | ||
| 4633 | |||
| 4634 | return IPR_RC_JOB_RETURN; | ||
| 4635 | } | ||
| 4636 | |||
| 4637 | /** | ||
| 4182 | * ipr_get_mode_page - Locate specified mode page | 4638 | * ipr_get_mode_page - Locate specified mode page |
| 4183 | * @mode_pages: mode page buffer | 4639 | * @mode_pages: mode page buffer |
| 4184 | * @page_code: page code to find | 4640 | * @page_code: page code to find |
| @@ -4389,10 +4845,7 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) | |||
| 4389 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), | 4845 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), |
| 4390 | length); | 4846 | length); |
| 4391 | 4847 | ||
| 4392 | ipr_cmd->job_step = ipr_set_supported_devs; | 4848 | ipr_cmd->job_step = ipr_setup_write_cache; |
| 4393 | ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, | ||
| 4394 | struct ipr_resource_entry, queue); | ||
| 4395 | |||
| 4396 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | 4849 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); |
| 4397 | 4850 | ||
| 4398 | LEAVE; | 4851 | LEAVE; |
| @@ -4431,6 +4884,51 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, | |||
| 4431 | } | 4884 | } |
| 4432 | 4885 | ||
| 4433 | /** | 4886 | /** |
| 4887 | * ipr_reset_cmd_failed - Handle failure of IOA reset command | ||
| 4888 | * @ipr_cmd: ipr command struct | ||
| 4889 | * | ||
| 4890 | * This function handles the failure of an IOA bringup command. | ||
| 4891 | * | ||
| 4892 | * Return value: | ||
| 4893 | * IPR_RC_JOB_RETURN | ||
| 4894 | **/ | ||
| 4895 | static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) | ||
| 4896 | { | ||
| 4897 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
| 4898 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | ||
| 4899 | |||
| 4900 | dev_err(&ioa_cfg->pdev->dev, | ||
| 4901 | "0x%02X failed with IOASC: 0x%08X\n", | ||
| 4902 | ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); | ||
| 4903 | |||
| 4904 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); | ||
| 4905 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | ||
| 4906 | return IPR_RC_JOB_RETURN; | ||
| 4907 | } | ||
| 4908 | |||
| 4909 | /** | ||
| 4910 | * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense | ||
| 4911 | * @ipr_cmd: ipr command struct | ||
| 4912 | * | ||
| 4913 | * This function handles the failure of a Mode Sense to the IOAFP. | ||
| 4914 | * Some adapters do not handle all mode pages. | ||
| 4915 | * | ||
| 4916 | * Return value: | ||
| 4917 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | ||
| 4918 | **/ | ||
| 4919 | static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) | ||
| 4920 | { | ||
| 4921 | u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); | ||
| 4922 | |||
| 4923 | if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { | ||
| 4924 | ipr_cmd->job_step = ipr_setup_write_cache; | ||
| 4925 | return IPR_RC_JOB_CONTINUE; | ||
| 4926 | } | ||
| 4927 | |||
| 4928 | return ipr_reset_cmd_failed(ipr_cmd); | ||
| 4929 | } | ||
| 4930 | |||
| 4931 | /** | ||
| 4434 | * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA | 4932 | * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA |
| 4435 | * @ipr_cmd: ipr command struct | 4933 | * @ipr_cmd: ipr command struct |
| 4436 | * | 4934 | * |
| @@ -4451,6 +4949,7 @@ static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) | |||
| 4451 | sizeof(struct ipr_mode_pages)); | 4949 | sizeof(struct ipr_mode_pages)); |
| 4452 | 4950 | ||
| 4453 | ipr_cmd->job_step = ipr_ioafp_mode_select_page28; | 4951 | ipr_cmd->job_step = ipr_ioafp_mode_select_page28; |
| 4952 | ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; | ||
| 4454 | 4953 | ||
| 4455 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | 4954 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); |
| 4456 | 4955 | ||
| @@ -4612,6 +5111,27 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, | |||
| 4612 | } | 5111 | } |
| 4613 | 5112 | ||
| 4614 | /** | 5113 | /** |
| 5114 | * ipr_inquiry_page_supported - Is the given inquiry page supported | ||
| 5115 | * @page0: inquiry page 0 buffer | ||
| 5116 | * @page: page code. | ||
| 5117 | * | ||
| 5118 | * This function determines if the specified inquiry page is supported. | ||
| 5119 | * | ||
| 5120 | * Return value: | ||
| 5121 | * 1 if page is supported / 0 if not | ||
| 5122 | **/ | ||
| 5123 | static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page) | ||
| 5124 | { | ||
| 5125 | int i; | ||
| 5126 | |||
| 5127 | for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) | ||
| 5128 | if (page0->page[i] == page) | ||
| 5129 | return 1; | ||
| 5130 | |||
| 5131 | return 0; | ||
| 5132 | } | ||
| 5133 | |||
| 5134 | /** | ||
| 4615 | * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. | 5135 | * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. |
| 4616 | * @ipr_cmd: ipr command struct | 5136 | * @ipr_cmd: ipr command struct |
| 4617 | * | 5137 | * |
| @@ -4624,6 +5144,36 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, | |||
| 4624 | static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) | 5144 | static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) |
| 4625 | { | 5145 | { |
| 4626 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5146 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 5147 | struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; | ||
| 5148 | |||
| 5149 | ENTER; | ||
| 5150 | |||
| 5151 | if (!ipr_inquiry_page_supported(page0, 1)) | ||
| 5152 | ioa_cfg->cache_state = CACHE_NONE; | ||
| 5153 | |||
| 5154 | ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; | ||
| 5155 | |||
| 5156 | ipr_ioafp_inquiry(ipr_cmd, 1, 3, | ||
| 5157 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), | ||
| 5158 | sizeof(struct ipr_inquiry_page3)); | ||
| 5159 | |||
| 5160 | LEAVE; | ||
| 5161 | return IPR_RC_JOB_RETURN; | ||
| 5162 | } | ||
| 5163 | |||
| 5164 | /** | ||
| 5165 | * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter. | ||
| 5166 | * @ipr_cmd: ipr command struct | ||
| 5167 | * | ||
| 5168 | * This function sends a Page 0 inquiry to the adapter | ||
| 5169 | * to retrieve supported inquiry pages. | ||
| 5170 | * | ||
| 5171 | * Return value: | ||
| 5172 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | ||
| 5173 | **/ | ||
| 5174 | static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd) | ||
| 5175 | { | ||
| 5176 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
| 4627 | char type[5]; | 5177 | char type[5]; |
| 4628 | 5178 | ||
| 4629 | ENTER; | 5179 | ENTER; |
| @@ -4633,11 +5183,11 @@ static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) | |||
| 4633 | type[4] = '\0'; | 5183 | type[4] = '\0'; |
| 4634 | ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); | 5184 | ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); |
| 4635 | 5185 | ||
| 4636 | ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; | 5186 | ipr_cmd->job_step = ipr_ioafp_page3_inquiry; |
| 4637 | 5187 | ||
| 4638 | ipr_ioafp_inquiry(ipr_cmd, 1, 3, | 5188 | ipr_ioafp_inquiry(ipr_cmd, 1, 0, |
| 4639 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), | 5189 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), |
| 4640 | sizeof(struct ipr_inquiry_page3)); | 5190 | sizeof(struct ipr_inquiry_page0)); |
| 4641 | 5191 | ||
| 4642 | LEAVE; | 5192 | LEAVE; |
| 4643 | return IPR_RC_JOB_RETURN; | 5193 | return IPR_RC_JOB_RETURN; |
| @@ -4657,7 +5207,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) | |||
| 4657 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5207 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 4658 | 5208 | ||
| 4659 | ENTER; | 5209 | ENTER; |
| 4660 | ipr_cmd->job_step = ipr_ioafp_page3_inquiry; | 5210 | ipr_cmd->job_step = ipr_ioafp_page0_inquiry; |
| 4661 | 5211 | ||
| 4662 | ipr_ioafp_inquiry(ipr_cmd, 0, 0, | 5212 | ipr_ioafp_inquiry(ipr_cmd, 0, 0, |
| 4663 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), | 5213 | ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), |
| @@ -4815,7 +5365,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) | |||
| 4815 | } | 5365 | } |
| 4816 | 5366 | ||
| 4817 | /* Enable destructive diagnostics on IOA */ | 5367 | /* Enable destructive diagnostics on IOA */ |
| 4818 | writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg); | 5368 | writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg); |
| 4819 | 5369 | ||
| 4820 | writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg); | 5370 | writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg); |
| 4821 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | 5371 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); |
| @@ -5147,12 +5697,7 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) | |||
| 5147 | ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; | 5697 | ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; |
| 5148 | ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; | 5698 | ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; |
| 5149 | 5699 | ||
| 5150 | if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) { | 5700 | ipr_build_ucode_ioadl(ipr_cmd, sglist); |
| 5151 | dev_err(&ioa_cfg->pdev->dev, | ||
| 5152 | "Failed to map microcode download buffer\n"); | ||
| 5153 | return IPR_RC_JOB_CONTINUE; | ||
| 5154 | } | ||
| 5155 | |||
| 5156 | ipr_cmd->job_step = ipr_reset_ucode_download_done; | 5701 | ipr_cmd->job_step = ipr_reset_ucode_download_done; |
| 5157 | 5702 | ||
| 5158 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, | 5703 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, |
| @@ -5217,7 +5762,6 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) | |||
| 5217 | static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) | 5762 | static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) |
| 5218 | { | 5763 | { |
| 5219 | u32 rc, ioasc; | 5764 | u32 rc, ioasc; |
| 5220 | unsigned long scratch = ipr_cmd->u.scratch; | ||
| 5221 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5765 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
| 5222 | 5766 | ||
| 5223 | do { | 5767 | do { |
| @@ -5233,17 +5777,13 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) | |||
| 5233 | } | 5777 | } |
| 5234 | 5778 | ||
| 5235 | if (IPR_IOASC_SENSE_KEY(ioasc)) { | 5779 | if (IPR_IOASC_SENSE_KEY(ioasc)) { |
| 5236 | dev_err(&ioa_cfg->pdev->dev, | 5780 | rc = ipr_cmd->job_step_failed(ipr_cmd); |
| 5237 | "0x%02X failed with IOASC: 0x%08X\n", | 5781 | if (rc == IPR_RC_JOB_RETURN) |
| 5238 | ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); | 5782 | return; |
| 5239 | |||
| 5240 | ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); | ||
| 5241 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | ||
| 5242 | return; | ||
| 5243 | } | 5783 | } |
| 5244 | 5784 | ||
| 5245 | ipr_reinit_ipr_cmnd(ipr_cmd); | 5785 | ipr_reinit_ipr_cmnd(ipr_cmd); |
| 5246 | ipr_cmd->u.scratch = scratch; | 5786 | ipr_cmd->job_step_failed = ipr_reset_cmd_failed; |
| 5247 | rc = ipr_cmd->job_step(ipr_cmd); | 5787 | rc = ipr_cmd->job_step(ipr_cmd); |
| 5248 | } while(rc == IPR_RC_JOB_CONTINUE); | 5788 | } while(rc == IPR_RC_JOB_CONTINUE); |
| 5249 | } | 5789 | } |
| @@ -5517,15 +6057,12 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) | |||
| 5517 | int i, rc = -ENOMEM; | 6057 | int i, rc = -ENOMEM; |
| 5518 | 6058 | ||
| 5519 | ENTER; | 6059 | ENTER; |
| 5520 | ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) * | 6060 | ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * |
| 5521 | IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); | 6061 | IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); |
| 5522 | 6062 | ||
| 5523 | if (!ioa_cfg->res_entries) | 6063 | if (!ioa_cfg->res_entries) |
| 5524 | goto out; | 6064 | goto out; |
| 5525 | 6065 | ||
| 5526 | memset(ioa_cfg->res_entries, 0, | ||
| 5527 | sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS); | ||
| 5528 | |||
| 5529 | for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) | 6066 | for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) |
| 5530 | list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); | 6067 | list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); |
| 5531 | 6068 | ||
| @@ -5566,15 +6103,12 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) | |||
| 5566 | list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); | 6103 | list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); |
| 5567 | } | 6104 | } |
| 5568 | 6105 | ||
| 5569 | ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) * | 6106 | ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) * |
| 5570 | IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); | 6107 | IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); |
| 5571 | 6108 | ||
| 5572 | if (!ioa_cfg->trace) | 6109 | if (!ioa_cfg->trace) |
| 5573 | goto out_free_hostrcb_dma; | 6110 | goto out_free_hostrcb_dma; |
| 5574 | 6111 | ||
| 5575 | memset(ioa_cfg->trace, 0, | ||
| 5576 | sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES); | ||
| 5577 | |||
| 5578 | rc = 0; | 6112 | rc = 0; |
| 5579 | out: | 6113 | out: |
| 5580 | LEAVE; | 6114 | LEAVE; |
| @@ -5642,6 +6176,9 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
| 5642 | ioa_cfg->host = host; | 6176 | ioa_cfg->host = host; |
| 5643 | ioa_cfg->pdev = pdev; | 6177 | ioa_cfg->pdev = pdev; |
| 5644 | ioa_cfg->log_level = ipr_log_level; | 6178 | ioa_cfg->log_level = ipr_log_level; |
| 6179 | ioa_cfg->doorbell = IPR_DOORBELL; | ||
| 6180 | if (!ipr_auto_create) | ||
| 6181 | ioa_cfg->doorbell |= IPR_RUNTIME_RESET; | ||
| 5645 | sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); | 6182 | sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); |
| 5646 | sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); | 6183 | sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); |
| 5647 | sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); | 6184 | sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); |
| @@ -5660,6 +6197,10 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
| 5660 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); | 6197 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); |
| 5661 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 6198 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
| 5662 | ioa_cfg->sdt_state = INACTIVE; | 6199 | ioa_cfg->sdt_state = INACTIVE; |
| 6200 | if (ipr_enable_cache) | ||
| 6201 | ioa_cfg->cache_state = CACHE_ENABLED; | ||
| 6202 | else | ||
| 6203 | ioa_cfg->cache_state = CACHE_DISABLED; | ||
| 5663 | 6204 | ||
| 5664 | ipr_initialize_bus_attr(ioa_cfg); | 6205 | ipr_initialize_bus_attr(ioa_cfg); |
| 5665 | 6206 | ||
| @@ -6008,6 +6549,7 @@ static int __devinit ipr_probe(struct pci_dev *pdev, | |||
| 6008 | ipr_scan_vsets(ioa_cfg); | 6549 | ipr_scan_vsets(ioa_cfg); |
| 6009 | scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); | 6550 | scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); |
| 6010 | ioa_cfg->allow_ml_add_del = 1; | 6551 | ioa_cfg->allow_ml_add_del = 1; |
| 6552 | ioa_cfg->host->max_channel = IPR_VSET_BUS; | ||
| 6011 | schedule_work(&ioa_cfg->work_q); | 6553 | schedule_work(&ioa_cfg->work_q); |
| 6012 | return 0; | 6554 | return 0; |
| 6013 | } | 6555 | } |
| @@ -6055,12 +6597,30 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { | |||
| 6055 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, | 6597 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, |
| 6056 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, | 6598 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, |
| 6057 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, | 6599 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, |
| 6600 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, | ||
| 6601 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, | ||
| 6602 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, | ||
| 6603 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, | ||
| 6604 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, | ||
| 6605 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, | ||
| 6606 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, | ||
| 6607 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, | ||
| 6608 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, | ||
| 6609 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, | ||
| 6610 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, | ||
| 6611 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, | ||
| 6612 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, | ||
| 6613 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, | ||
| 6614 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, | ||
| 6058 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, | 6615 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, |
| 6059 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, | 6616 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, |
| 6060 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, | 6617 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, |
| 6061 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, | 6618 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, |
| 6062 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, | 6619 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, |
| 6063 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, | 6620 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, |
| 6621 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, | ||
| 6622 | PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, | ||
| 6623 | 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, | ||
| 6064 | { } | 6624 | { } |
| 6065 | }; | 6625 | }; |
| 6066 | MODULE_DEVICE_TABLE(pci, ipr_pci_table); | 6626 | MODULE_DEVICE_TABLE(pci, ipr_pci_table); |
