aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDon Brace <don.brace@pmcs.com>2015-01-23 17:43:30 -0500
committerJames Bottomley <JBottomley@Parallels.com>2015-02-02 12:57:40 -0500
commit03383736348bb73a45f8460afca3c5f5bd1be172 (patch)
treebd4d9b4450027d3f9f4f259d60c000558a68d7f4
parent080ef1cc7fdf5d0800775c8626718da807e7ba99 (diff)
hpsa: honor queue depth of physical devices
When using the ioaccel submission methods, requests destined for RAID volumes are sometimes diverted to physical devices. The OS has no or limited knowledge of these physical devices, so it is up to the driver to avoid pushing the device too hard. It is better to honor the physical device queue limit rather than making the device spew zillions of TASK SET FULL responses. This is so that hpsa based devices support /sys/block/sdNN/device/queue_type of simple, which lets the SCSI midlayer automatically adjust the queue_depth based on TASK SET FULL and GOOD status. Adjust the queue depth for a new device after it is created based on the maximum queue depths of the physical devices that constitute the device. This drops the maximum queue depth from .can_queue of 1024 to something like 174 for single-drive RAID-0, 348 for two-drive RAID-1, etc. It also adjusts for the ratio of data to parity drives. Reviewed-by: Scott Teel <scott.teel@pmcs.com> Signed-off-by: Webb Scales <webbnh@hp.com> Signed-off-by: Don Brace <don.brace@pmcs.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/scsi/hpsa.c318
-rw-r--r--drivers/scsi/hpsa.h14
-rw-r--r--drivers/scsi/hpsa_cmd.h148
3 files changed, 412 insertions, 68 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index dcacb29ff589..60f57347d53b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -247,7 +247,7 @@ static void hpsa_drain_accel_commands(struct ctlr_info *h);
247static void hpsa_flush_cache(struct ctlr_info *h); 247static void hpsa_flush_cache(struct ctlr_info *h);
248static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 248static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
249 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 249 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
250 u8 *scsi3addr); 250 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
251static void hpsa_command_resubmit_worker(struct work_struct *work); 251static void hpsa_command_resubmit_worker(struct work_struct *work);
252 252
253static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 253static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
@@ -965,12 +965,24 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
965 /* Raid level changed. */ 965 /* Raid level changed. */
966 h->dev[entry]->raid_level = new_entry->raid_level; 966 h->dev[entry]->raid_level = new_entry->raid_level;
967 967
968 /* Raid offload parameters changed. */ 968 /* Raid offload parameters changed. Careful about the ordering. */
969 if (new_entry->offload_config && new_entry->offload_enabled) {
970 /*
971 * if drive is newly offload_enabled, we want to copy the
972 * raid map data first. If previously offload_enabled and
973 * offload_config were set, raid map data had better be
974 * the same as it was before. if raid map data is changed
975 * then it had better be the case that
976 * h->dev[entry]->offload_enabled is currently 0.
977 */
978 h->dev[entry]->raid_map = new_entry->raid_map;
979 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
980 wmb(); /* ensure raid map updated prior to ->offload_enabled */
981 }
969 h->dev[entry]->offload_config = new_entry->offload_config; 982 h->dev[entry]->offload_config = new_entry->offload_config;
970 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
971 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
972 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 983 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
973 h->dev[entry]->raid_map = new_entry->raid_map; 984 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
985 h->dev[entry]->queue_depth = new_entry->queue_depth;
974 986
975 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 987 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
976 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 988 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
@@ -1096,6 +1108,8 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1096 return 1; 1108 return 1;
1097 if (dev1->offload_enabled != dev2->offload_enabled) 1109 if (dev1->offload_enabled != dev2->offload_enabled)
1098 return 1; 1110 return 1;
1111 if (dev1->queue_depth != dev2->queue_depth)
1112 return 1;
1099 return 0; 1113 return 0;
1100} 1114}
1101 1115
@@ -1241,6 +1255,85 @@ static void hpsa_show_volume_status(struct ctlr_info *h,
1241 } 1255 }
1242} 1256}
1243 1257
1258/*
1259 * Figure the list of physical drive pointers for a logical drive with
1260 * raid offload configured.
1261 */
1262static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1263 struct hpsa_scsi_dev_t *dev[], int ndevices,
1264 struct hpsa_scsi_dev_t *logical_drive)
1265{
1266 struct raid_map_data *map = &logical_drive->raid_map;
1267 struct raid_map_disk_data *dd = &map->data[0];
1268 int i, j;
1269 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1270 le16_to_cpu(map->metadata_disks_per_row);
1271 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1272 le16_to_cpu(map->layout_map_count) *
1273 total_disks_per_row;
1274 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1275 total_disks_per_row;
1276 int qdepth;
1277
1278 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1279 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1280
1281 qdepth = 0;
1282 for (i = 0; i < nraid_map_entries; i++) {
1283 logical_drive->phys_disk[i] = NULL;
1284 if (!logical_drive->offload_config)
1285 continue;
1286 for (j = 0; j < ndevices; j++) {
1287 if (dev[j]->devtype != TYPE_DISK)
1288 continue;
1289 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1290 continue;
1291 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1292 continue;
1293
1294 logical_drive->phys_disk[i] = dev[j];
1295 if (i < nphys_disk)
1296 qdepth = min(h->nr_cmds, qdepth +
1297 logical_drive->phys_disk[i]->queue_depth);
1298 break;
1299 }
1300
1301 /*
1302 * This can happen if a physical drive is removed and
1303 * the logical drive is degraded. In that case, the RAID
1304 * map data will refer to a physical disk which isn't actually
1305 * present. And in that case offload_enabled should already
1306 * be 0, but we'll turn it off here just in case
1307 */
1308 if (!logical_drive->phys_disk[i]) {
1309 logical_drive->offload_enabled = 0;
1310 logical_drive->queue_depth = h->nr_cmds;
1311 }
1312 }
1313 if (nraid_map_entries)
1314 /*
1315 * This is correct for reads, too high for full stripe writes,
1316 * way too high for partial stripe writes
1317 */
1318 logical_drive->queue_depth = qdepth;
1319 else
1320 logical_drive->queue_depth = h->nr_cmds;
1321}
1322
1323static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1324 struct hpsa_scsi_dev_t *dev[], int ndevices)
1325{
1326 int i;
1327
1328 for (i = 0; i < ndevices; i++) {
1329 if (dev[i]->devtype != TYPE_DISK)
1330 continue;
1331 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1332 continue;
1333 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1334 }
1335}
1336
1244static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1337static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1245 struct hpsa_scsi_dev_t *sd[], int nsds) 1338 struct hpsa_scsi_dev_t *sd[], int nsds)
1246{ 1339{
@@ -1425,8 +1518,12 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
1425 spin_lock_irqsave(&h->devlock, flags); 1518 spin_lock_irqsave(&h->devlock, flags);
1426 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1519 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1427 sdev_id(sdev), sdev->lun); 1520 sdev_id(sdev), sdev->lun);
1428 if (sd != NULL) 1521 if (sd != NULL) {
1429 sdev->hostdata = sd; 1522 sdev->hostdata = sd;
1523 if (sd->queue_depth)
1524 scsi_change_queue_depth(sdev, sd->queue_depth);
1525 atomic_set(&sd->ioaccel_cmds_out, 0);
1526 }
1430 spin_unlock_irqrestore(&h->devlock, flags); 1527 spin_unlock_irqrestore(&h->devlock, flags);
1431 return 0; 1528 return 0;
1432} 1529}
@@ -1679,6 +1776,9 @@ static void complete_scsi_command(struct CommandList *cp)
1679 cmd->result = (DID_OK << 16); /* host byte */ 1776 cmd->result = (DID_OK << 16); /* host byte */
1680 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1777 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1681 1778
1779 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1780 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1781
1682 if (cp->cmd_type == CMD_IOACCEL2) 1782 if (cp->cmd_type == CMD_IOACCEL2)
1683 return process_ioaccel2_completion(h, cp, cmd, dev); 1783 return process_ioaccel2_completion(h, cp, cmd, dev);
1684 1784
@@ -1686,6 +1786,8 @@ static void complete_scsi_command(struct CommandList *cp)
1686 1786
1687 scsi_set_resid(cmd, ei->ResidualCnt); 1787 scsi_set_resid(cmd, ei->ResidualCnt);
1688 if (ei->CommandStatus == 0) { 1788 if (ei->CommandStatus == 0) {
1789 if (cp->cmd_type == CMD_IOACCEL1)
1790 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1689 cmd_free(h, cp); 1791 cmd_free(h, cp);
1690 cmd->scsi_done(cmd); 1792 cmd->scsi_done(cmd);
1691 return; 1793 return;
@@ -2248,6 +2350,34 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
2248 return rc; 2350 return rc;
2249} 2351}
2250 2352
2353static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2354 unsigned char scsi3addr[], u16 bmic_device_index,
2355 struct bmic_identify_physical_device *buf, size_t bufsize)
2356{
2357 int rc = IO_OK;
2358 struct CommandList *c;
2359 struct ErrorInfo *ei;
2360
2361 c = cmd_alloc(h);
2362 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2363 0, RAID_CTLR_LUNID, TYPE_CMD);
2364 if (rc)
2365 goto out;
2366
2367 c->Request.CDB[2] = bmic_device_index & 0xff;
2368 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2369
2370 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2371 ei = c->err_info;
2372 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2373 hpsa_scsi_interpret_error(h, c);
2374 rc = -1;
2375 }
2376out:
2377 cmd_free(h, c);
2378 return rc;
2379}
2380
2251static int hpsa_vpd_page_supported(struct ctlr_info *h, 2381static int hpsa_vpd_page_supported(struct ctlr_info *h,
2252 unsigned char scsi3addr[], u8 page) 2382 unsigned char scsi3addr[], u8 page)
2253{ 2383{
@@ -2348,7 +2478,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2348} 2478}
2349 2479
2350static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 2480static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2351 struct ReportLUNdata *buf, int bufsize, 2481 void *buf, int bufsize,
2352 int extended_response) 2482 int extended_response)
2353{ 2483{
2354 int rc = IO_OK; 2484 int rc = IO_OK;
@@ -2377,11 +2507,13 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2377 hpsa_scsi_interpret_error(h, c); 2507 hpsa_scsi_interpret_error(h, c);
2378 rc = -1; 2508 rc = -1;
2379 } else { 2509 } else {
2380 if (buf->extended_response_flag != extended_response) { 2510 struct ReportLUNdata *rld = buf;
2511
2512 if (rld->extended_response_flag != extended_response) {
2381 dev_err(&h->pdev->dev, 2513 dev_err(&h->pdev->dev,
2382 "report luns requested format %u, got %u\n", 2514 "report luns requested format %u, got %u\n",
2383 extended_response, 2515 extended_response,
2384 buf->extended_response_flag); 2516 rld->extended_response_flag);
2385 rc = -1; 2517 rc = -1;
2386 } 2518 }
2387 } 2519 }
@@ -2391,10 +2523,10 @@ out:
2391} 2523}
2392 2524
2393static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 2525static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2394 struct ReportLUNdata *buf, 2526 struct ReportExtendedLUNdata *buf, int bufsize)
2395 int bufsize, int extended_response)
2396{ 2527{
2397 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 2528 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2529 HPSA_REPORT_PHYS_EXTENDED);
2398} 2530}
2399 2531
2400static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 2532static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -2569,6 +2701,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
2569 this_device->offload_config = 0; 2701 this_device->offload_config = 0;
2570 this_device->offload_enabled = 0; 2702 this_device->offload_enabled = 0;
2571 this_device->volume_offline = 0; 2703 this_device->volume_offline = 0;
2704 this_device->queue_depth = h->nr_cmds;
2572 } 2705 }
2573 2706
2574 if (is_OBDR_device) { 2707 if (is_OBDR_device) {
@@ -2711,7 +2844,6 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2711{ 2844{
2712 struct ReportExtendedLUNdata *physicals = NULL; 2845 struct ReportExtendedLUNdata *physicals = NULL;
2713 int responsesize = 24; /* size of physical extended response */ 2846 int responsesize = 24; /* size of physical extended response */
2714 int extended = 2; /* flag forces reporting 'other dev info'. */
2715 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 2847 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2716 u32 nphysicals = 0; /* number of reported physical devs */ 2848 u32 nphysicals = 0; /* number of reported physical devs */
2717 int found = 0; /* found match (1) or not (0) */ 2849 int found = 0; /* found match (1) or not (0) */
@@ -2758,8 +2890,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2758 physicals = kzalloc(reportsize, GFP_KERNEL); 2890 physicals = kzalloc(reportsize, GFP_KERNEL);
2759 if (physicals == NULL) 2891 if (physicals == NULL)
2760 return 0; 2892 return 0;
2761 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, 2893 if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
2762 reportsize, extended)) {
2763 dev_err(&h->pdev->dev, 2894 dev_err(&h->pdev->dev,
2764 "Can't lookup %s device handle: report physical LUNs failed.\n", 2895 "Can't lookup %s device handle: report physical LUNs failed.\n",
2765 "HP SSD Smart Path"); 2896 "HP SSD Smart Path");
@@ -2800,34 +2931,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2800 * Returns 0 on success, -1 otherwise. 2931 * Returns 0 on success, -1 otherwise.
2801 */ 2932 */
2802static int hpsa_gather_lun_info(struct ctlr_info *h, 2933static int hpsa_gather_lun_info(struct ctlr_info *h,
2803 int reportphyslunsize, int reportloglunsize, 2934 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
2804 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
2805 struct ReportLUNdata *logdev, u32 *nlogicals) 2935 struct ReportLUNdata *logdev, u32 *nlogicals)
2806{ 2936{
2807 int physical_entry_size = 8; 2937 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
2808
2809 *physical_mode = 0;
2810
2811 /* For I/O accelerator mode we need to read physical device handles */
2812 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2813 h->transMethod & CFGTBL_Trans_io_accel2) {
2814 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2815 physical_entry_size = 24;
2816 }
2817 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize,
2818 *physical_mode)) {
2819 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2938 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2820 return -1; 2939 return -1;
2821 } 2940 }
2822 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 2941 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
2823 physical_entry_size;
2824 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2942 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2825 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 2943 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
2826 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2944 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
2827 *nphysicals - HPSA_MAX_PHYS_LUN);
2828 *nphysicals = HPSA_MAX_PHYS_LUN; 2945 *nphysicals = HPSA_MAX_PHYS_LUN;
2829 } 2946 }
2830 if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) { 2947 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
2831 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2948 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2832 return -1; 2949 return -1;
2833 } 2950 }
@@ -2900,6 +3017,33 @@ static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2900 return hba_mode_enabled; 3017 return hba_mode_enabled;
2901} 3018}
2902 3019
3020/* get physical drive ioaccel handle and queue depth */
3021static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3022 struct hpsa_scsi_dev_t *dev,
3023 u8 *lunaddrbytes,
3024 struct bmic_identify_physical_device *id_phys)
3025{
3026 int rc;
3027 struct ext_report_lun_entry *rle =
3028 (struct ext_report_lun_entry *) lunaddrbytes;
3029
3030 dev->ioaccel_handle = rle->ioaccel_handle;
3031 memset(id_phys, 0, sizeof(*id_phys));
3032 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3033 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3034 sizeof(*id_phys));
3035 if (!rc)
3036 /* Reserve space for FW operations */
3037#define DRIVE_CMDS_RESERVED_FOR_FW 2
3038#define DRIVE_QUEUE_DEPTH 7
3039 dev->queue_depth =
3040 le16_to_cpu(id_phys->current_queue_depth_limit) -
3041 DRIVE_CMDS_RESERVED_FOR_FW;
3042 else
3043 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3044 atomic_set(&dev->ioaccel_cmds_out, 0);
3045}
3046
2903static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 3047static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2904{ 3048{
2905 /* the idea here is we could get notified 3049 /* the idea here is we could get notified
@@ -2914,9 +3058,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2914 */ 3058 */
2915 struct ReportExtendedLUNdata *physdev_list = NULL; 3059 struct ReportExtendedLUNdata *physdev_list = NULL;
2916 struct ReportLUNdata *logdev_list = NULL; 3060 struct ReportLUNdata *logdev_list = NULL;
3061 struct bmic_identify_physical_device *id_phys = NULL;
2917 u32 nphysicals = 0; 3062 u32 nphysicals = 0;
2918 u32 nlogicals = 0; 3063 u32 nlogicals = 0;
2919 int physical_mode = 0;
2920 u32 ndev_allocated = 0; 3064 u32 ndev_allocated = 0;
2921 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 3065 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
2922 int ncurrent = 0; 3066 int ncurrent = 0;
@@ -2929,8 +3073,10 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2929 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); 3073 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
2930 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); 3074 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
2931 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 3075 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3076 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
2932 3077
2933 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 3078 if (!currentsd || !physdev_list || !logdev_list ||
3079 !tmpdevice || !id_phys) {
2934 dev_err(&h->pdev->dev, "out of memory\n"); 3080 dev_err(&h->pdev->dev, "out of memory\n");
2935 goto out; 3081 goto out;
2936 } 3082 }
@@ -2947,10 +3093,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2947 3093
2948 h->hba_mode_enabled = rescan_hba_mode; 3094 h->hba_mode_enabled = rescan_hba_mode;
2949 3095
2950 if (hpsa_gather_lun_info(h, 3096 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
2951 sizeof(*physdev_list), sizeof(*logdev_list), 3097 logdev_list, &nlogicals))
2952 (struct ReportLUNdata *) physdev_list, &nphysicals,
2953 &physical_mode, logdev_list, &nlogicals))
2954 goto out; 3098 goto out;
2955 3099
2956 /* We might see up to the maximum number of logical and physical disks 3100 /* We might see up to the maximum number of logical and physical disks
@@ -3047,10 +3191,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3047 ncurrent++; 3191 ncurrent++;
3048 break; 3192 break;
3049 } 3193 }
3050 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { 3194 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
3051 memcpy(&this_device->ioaccel_handle, 3195 h->transMethod & CFGTBL_Trans_io_accel2) {
3052 &lunaddrbytes[20], 3196 hpsa_get_ioaccel_drive_info(h, this_device,
3053 sizeof(this_device->ioaccel_handle)); 3197 lunaddrbytes, id_phys);
3198 atomic_set(&this_device->ioaccel_cmds_out, 0);
3054 ncurrent++; 3199 ncurrent++;
3055 } 3200 }
3056 break; 3201 break;
@@ -3074,6 +3219,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3074 if (ncurrent >= HPSA_MAX_DEVICES) 3219 if (ncurrent >= HPSA_MAX_DEVICES)
3075 break; 3220 break;
3076 } 3221 }
3222 hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
3077 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 3223 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3078out: 3224out:
3079 kfree(tmpdevice); 3225 kfree(tmpdevice);
@@ -3082,6 +3228,7 @@ out:
3082 kfree(currentsd); 3228 kfree(currentsd);
3083 kfree(physdev_list); 3229 kfree(physdev_list);
3084 kfree(logdev_list); 3230 kfree(logdev_list);
3231 kfree(id_phys);
3085} 3232}
3086 3233
3087/* 3234/*
@@ -3197,7 +3344,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3197 3344
3198static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 3345static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3199 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3346 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3200 u8 *scsi3addr) 3347 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3201{ 3348{
3202 struct scsi_cmnd *cmd = c->scsi_cmd; 3349 struct scsi_cmnd *cmd = c->scsi_cmd;
3203 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 3350 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
@@ -3210,13 +3357,17 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3210 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 3357 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3211 3358
3212 /* TODO: implement chaining support */ 3359 /* TODO: implement chaining support */
3213 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3360 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3361 atomic_dec(&phys_disk->ioaccel_cmds_out);
3214 return IO_ACCEL_INELIGIBLE; 3362 return IO_ACCEL_INELIGIBLE;
3363 }
3215 3364
3216 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 3365 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3217 3366
3218 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3367 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3368 atomic_dec(&phys_disk->ioaccel_cmds_out);
3219 return IO_ACCEL_INELIGIBLE; 3369 return IO_ACCEL_INELIGIBLE;
3370 }
3220 3371
3221 c->cmd_type = CMD_IOACCEL1; 3372 c->cmd_type = CMD_IOACCEL1;
3222 3373
@@ -3226,8 +3377,10 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3226 BUG_ON(c->busaddr & 0x0000007F); 3377 BUG_ON(c->busaddr & 0x0000007F);
3227 3378
3228 use_sg = scsi_dma_map(cmd); 3379 use_sg = scsi_dma_map(cmd);
3229 if (use_sg < 0) 3380 if (use_sg < 0) {
3381 atomic_dec(&phys_disk->ioaccel_cmds_out);
3230 return use_sg; 3382 return use_sg;
3383 }
3231 3384
3232 if (use_sg) { 3385 if (use_sg) {
3233 curr_sg = cp->SG; 3386 curr_sg = cp->SG;
@@ -3286,8 +3439,10 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3286 struct scsi_cmnd *cmd = c->scsi_cmd; 3439 struct scsi_cmnd *cmd = c->scsi_cmd;
3287 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3440 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3288 3441
3442 c->phys_disk = dev;
3443
3289 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 3444 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3290 cmd->cmnd, cmd->cmd_len, dev->scsi3addr); 3445 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3291} 3446}
3292 3447
3293/* 3448/*
@@ -3351,7 +3506,7 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
3351 3506
3352static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3507static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3353 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3508 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3354 u8 *scsi3addr) 3509 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3355{ 3510{
3356 struct scsi_cmnd *cmd = c->scsi_cmd; 3511 struct scsi_cmnd *cmd = c->scsi_cmd;
3357 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 3512 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -3362,11 +3517,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3362 u32 len; 3517 u32 len;
3363 u32 total_len = 0; 3518 u32 total_len = 0;
3364 3519
3365 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3520 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3521 atomic_dec(&phys_disk->ioaccel_cmds_out);
3366 return IO_ACCEL_INELIGIBLE; 3522 return IO_ACCEL_INELIGIBLE;
3523 }
3367 3524
3368 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3525 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3526 atomic_dec(&phys_disk->ioaccel_cmds_out);
3369 return IO_ACCEL_INELIGIBLE; 3527 return IO_ACCEL_INELIGIBLE;
3528 }
3529
3370 c->cmd_type = CMD_IOACCEL2; 3530 c->cmd_type = CMD_IOACCEL2;
3371 /* Adjust the DMA address to point to the accelerated command buffer */ 3531 /* Adjust the DMA address to point to the accelerated command buffer */
3372 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 3532 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
@@ -3377,8 +3537,10 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3377 cp->IU_type = IOACCEL2_IU_TYPE; 3537 cp->IU_type = IOACCEL2_IU_TYPE;
3378 3538
3379 use_sg = scsi_dma_map(cmd); 3539 use_sg = scsi_dma_map(cmd);
3380 if (use_sg < 0) 3540 if (use_sg < 0) {
3541 atomic_dec(&phys_disk->ioaccel_cmds_out);
3381 return use_sg; 3542 return use_sg;
3543 }
3382 3544
3383 if (use_sg) { 3545 if (use_sg) {
3384 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); 3546 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
@@ -3444,14 +3606,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3444 */ 3606 */
3445static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 3607static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3446 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3608 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3447 u8 *scsi3addr) 3609 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3448{ 3610{
3611 /* Try to honor the device's queue depth */
3612 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3613 phys_disk->queue_depth) {
3614 atomic_dec(&phys_disk->ioaccel_cmds_out);
3615 return IO_ACCEL_INELIGIBLE;
3616 }
3449 if (h->transMethod & CFGTBL_Trans_io_accel1) 3617 if (h->transMethod & CFGTBL_Trans_io_accel1)
3450 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 3618 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3451 cdb, cdb_len, scsi3addr); 3619 cdb, cdb_len, scsi3addr,
3620 phys_disk);
3452 else 3621 else
3453 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 3622 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3454 cdb, cdb_len, scsi3addr); 3623 cdb, cdb_len, scsi3addr,
3624 phys_disk);
3455} 3625}
3456 3626
3457static void raid_map_helper(struct raid_map_data *map, 3627static void raid_map_helper(struct raid_map_data *map,
@@ -3755,6 +3925,8 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3755 return IO_ACCEL_INELIGIBLE; 3925 return IO_ACCEL_INELIGIBLE;
3756 } 3926 }
3757 3927
3928 c->phys_disk = dev->phys_disk[map_index];
3929
3758 disk_handle = dd[map_index].ioaccel_handle; 3930 disk_handle = dd[map_index].ioaccel_handle;
3759 disk_block = le64_to_cpu(map->disk_starting_blk) + 3931 disk_block = le64_to_cpu(map->disk_starting_blk) +
3760 first_row * le16_to_cpu(map->strip_size) + 3932 first_row * le16_to_cpu(map->strip_size) +
@@ -3802,7 +3974,8 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3802 cdb_len = 10; 3974 cdb_len = 10;
3803 } 3975 }
3804 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 3976 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3805 dev->scsi3addr); 3977 dev->scsi3addr,
3978 dev->phys_disk[map_index]);
3806} 3979}
3807 3980
3808/* Submit commands down the "normal" RAID stack path */ 3981/* Submit commands down the "normal" RAID stack path */
@@ -4016,15 +4189,17 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
4016 4189
4017static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 4190static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4018{ 4191{
4019 struct ctlr_info *h = sdev_to_hba(sdev); 4192 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4193
4194 if (!logical_drive)
4195 return -ENODEV;
4020 4196
4021 if (qdepth < 1) 4197 if (qdepth < 1)
4022 qdepth = 1; 4198 qdepth = 1;
4023 else 4199 else if (qdepth > logical_drive->queue_depth)
4024 if (qdepth > h->nr_cmds) 4200 qdepth = logical_drive->queue_depth;
4025 qdepth = h->nr_cmds; 4201
4026 scsi_change_queue_depth(sdev, qdepth); 4202 return scsi_change_queue_depth(sdev, qdepth);
4027 return sdev->queue_depth;
4028} 4203}
4029 4204
4030static int hpsa_scan_finished(struct Scsi_Host *sh, 4205static int hpsa_scan_finished(struct Scsi_Host *sh,
@@ -4068,10 +4243,7 @@ static int hpsa_register_scsi(struct ctlr_info *h)
4068 HPSA_CMDS_RESERVED_FOR_ABORTS - 4243 HPSA_CMDS_RESERVED_FOR_ABORTS -
4069 HPSA_CMDS_RESERVED_FOR_DRIVER - 4244 HPSA_CMDS_RESERVED_FOR_DRIVER -
4070 HPSA_MAX_CONCURRENT_PASSTHRUS; 4245 HPSA_MAX_CONCURRENT_PASSTHRUS;
4071 if (h->hba_mode_enabled) 4246 sh->cmd_per_lun = sh->can_queue;
4072 sh->cmd_per_lun = 7;
4073 else
4074 sh->cmd_per_lun = sh->can_queue;
4075 sh->sg_tablesize = h->maxsgentries; 4247 sh->sg_tablesize = h->maxsgentries;
4076 h->scsi_host = sh; 4248 h->scsi_host = sh;
4077 sh->hostdata[0] = (unsigned long) h; 4249 sh->hostdata[0] = (unsigned long) h;
@@ -5090,6 +5262,16 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5090 c->Request.CDB[7] = (size >> 16) & 0xFF; 5262 c->Request.CDB[7] = (size >> 16) & 0xFF;
5091 c->Request.CDB[8] = (size >> 8) & 0xFF; 5263 c->Request.CDB[8] = (size >> 8) & 0xFF;
5092 break; 5264 break;
5265 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5266 c->Request.CDBLen = 10;
5267 c->Request.type_attr_dir =
5268 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5269 c->Request.Timeout = 0;
5270 c->Request.CDB[0] = BMIC_READ;
5271 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5272 c->Request.CDB[7] = (size >> 16) & 0xFF;
5273 c->Request.CDB[8] = (size >> 8) & 0XFF;
5274 break;
5093 default: 5275 default:
5094 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 5276 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5095 BUG(); 5277 BUG();
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a0f4268df457..d0fb854195ee 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -46,6 +46,11 @@ struct hpsa_scsi_dev_t {
46 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 46 unsigned char model[16]; /* bytes 16-31 of inquiry data */
47 unsigned char raid_level; /* from inquiry page 0xC1 */ 47 unsigned char raid_level; /* from inquiry page 0xC1 */
48 unsigned char volume_offline; /* discovered via TUR or VPD */ 48 unsigned char volume_offline; /* discovered via TUR or VPD */
49 u16 queue_depth; /* max queue_depth for this device */
50 atomic_t ioaccel_cmds_out; /* Only used for physical devices
51 * counts commands sent to physical
52 * device via "ioaccel" path.
53 */
49 u32 ioaccel_handle; 54 u32 ioaccel_handle;
50 int offload_config; /* I/O accel RAID offload configured */ 55 int offload_config; /* I/O accel RAID offload configured */
51 int offload_enabled; /* I/O accel RAID offload enabled */ 56 int offload_enabled; /* I/O accel RAID offload enabled */
@@ -54,6 +59,15 @@ struct hpsa_scsi_dev_t {
54 */ 59 */
55 struct raid_map_data raid_map; /* I/O accelerator RAID map */ 60 struct raid_map_data raid_map; /* I/O accelerator RAID map */
56 61
62 /*
63 * Pointers from logical drive map indices to the phys drives that
64 * make those logical drives. Note, multiple logical drives may
65 * share physical drives. You can have for instance 5 physical
66 * drives with 3 logical drives each using those same 5 physical
67 * disks. We need these pointers for counting i/o's out to physical
68 * devices in order to honor physical device queue depth limits.
69 */
70 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
57}; 71};
58 72
59struct reply_queue_buffer { 73struct reply_queue_buffer {
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3f2f0af6abb2..4726dbb67fa3 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -240,6 +240,10 @@ struct ReportLUNdata {
240 240
241struct ext_report_lun_entry { 241struct ext_report_lun_entry {
242 u8 lunid[8]; 242 u8 lunid[8];
243#define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F)
244#define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6])
245#define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \
246 GET_BMIC_LEVEL_TWO_TARGET((lunid)))
243 u8 wwid[8]; 247 u8 wwid[8];
244 u8 device_type; 248 u8 device_type;
245 u8 device_flags; 249 u8 device_flags;
@@ -268,6 +272,7 @@ struct SenseSubsystem_info {
268#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ 272#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
269#define BMIC_FLASH_FIRMWARE 0xF7 273#define BMIC_FLASH_FIRMWARE 0xF7
270#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 274#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
275#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
271 276
272/* Command List Structure */ 277/* Command List Structure */
273union SCSI3Addr { 278union SCSI3Addr {
@@ -405,6 +410,17 @@ struct CommandList {
405 struct completion *waiting; 410 struct completion *waiting;
406 void *scsi_cmd; 411 void *scsi_cmd;
407 struct work_struct work; 412 struct work_struct work;
413
414 /*
415 * For commands using either of the two "ioaccel" paths to
416 * bypass the RAID stack and go directly to the physical disk
417 * phys_disk is a pointer to the hpsa_scsi_dev_t to which the
418 * i/o is destined. We need to store that here because the command
419 * may potentially encounter TASK SET FULL and need to be resubmitted
420 * For "normal" i/o's not using the "ioaccel" paths, phys_disk is
421 * not used.
422 */
423 struct hpsa_scsi_dev_t *phys_disk;
408} __aligned(COMMANDLIST_ALIGNMENT); 424} __aligned(COMMANDLIST_ALIGNMENT);
409 425
410/* Max S/G elements in I/O accelerator command */ 426/* Max S/G elements in I/O accelerator command */
@@ -641,5 +657,137 @@ struct hpsa_pci_info {
641 u32 board_id; 657 u32 board_id;
642}; 658};
643 659
660struct bmic_identify_physical_device {
661 u8 scsi_bus; /* SCSI Bus number on controller */
662 u8 scsi_id; /* SCSI ID on this bus */
663 __le16 block_size; /* sector size in bytes */
664 __le32 total_blocks; /* number for sectors on drive */
665 __le32 reserved_blocks; /* controller reserved (RIS) */
666 u8 model[40]; /* Physical Drive Model */
667 u8 serial_number[40]; /* Drive Serial Number */
668 u8 firmware_revision[8]; /* drive firmware revision */
669 u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
670 u8 compaq_drive_stamp; /* 0 means drive not stamped */
671 u8 last_failure_reason;
672#define BMIC_LAST_FAILURE_TOO_SMALL_IN_LOAD_CONFIG 0x01
673#define BMIC_LAST_FAILURE_ERROR_ERASING_RIS 0x02
674#define BMIC_LAST_FAILURE_ERROR_SAVING_RIS 0x03
675#define BMIC_LAST_FAILURE_FAIL_DRIVE_COMMAND 0x04
676#define BMIC_LAST_FAILURE_MARK_BAD_FAILED 0x05
677#define BMIC_LAST_FAILURE_MARK_BAD_FAILED_IN_FINISH_REMAP 0x06
678#define BMIC_LAST_FAILURE_TIMEOUT 0x07
679#define BMIC_LAST_FAILURE_AUTOSENSE_FAILED 0x08
680#define BMIC_LAST_FAILURE_MEDIUM_ERROR_1 0x09
681#define BMIC_LAST_FAILURE_MEDIUM_ERROR_2 0x0a
682#define BMIC_LAST_FAILURE_NOT_READY_BAD_SENSE 0x0b
683#define BMIC_LAST_FAILURE_NOT_READY 0x0c
684#define BMIC_LAST_FAILURE_HARDWARE_ERROR 0x0d
685#define BMIC_LAST_FAILURE_ABORTED_COMMAND 0x0e
686#define BMIC_LAST_FAILURE_WRITE_PROTECTED 0x0f
687#define BMIC_LAST_FAILURE_SPIN_UP_FAILURE_IN_RECOVER 0x10
688#define BMIC_LAST_FAILURE_REBUILD_WRITE_ERROR 0x11
689#define BMIC_LAST_FAILURE_TOO_SMALL_IN_HOT_PLUG 0x12
690#define BMIC_LAST_FAILURE_BUS_RESET_RECOVERY_ABORTED 0x13
691#define BMIC_LAST_FAILURE_REMOVED_IN_HOT_PLUG 0x14
692#define BMIC_LAST_FAILURE_INIT_REQUEST_SENSE_FAILED 0x15
693#define BMIC_LAST_FAILURE_INIT_START_UNIT_FAILED 0x16
694#define BMIC_LAST_FAILURE_INQUIRY_FAILED 0x17
695#define BMIC_LAST_FAILURE_NON_DISK_DEVICE 0x18
696#define BMIC_LAST_FAILURE_READ_CAPACITY_FAILED 0x19
697#define BMIC_LAST_FAILURE_INVALID_BLOCK_SIZE 0x1a
698#define BMIC_LAST_FAILURE_HOT_PLUG_REQUEST_SENSE_FAILED 0x1b
699#define BMIC_LAST_FAILURE_HOT_PLUG_START_UNIT_FAILED 0x1c
700#define BMIC_LAST_FAILURE_WRITE_ERROR_AFTER_REMAP 0x1d
701#define BMIC_LAST_FAILURE_INIT_RESET_RECOVERY_ABORTED 0x1e
702#define BMIC_LAST_FAILURE_DEFERRED_WRITE_ERROR 0x1f
703#define BMIC_LAST_FAILURE_MISSING_IN_SAVE_RIS 0x20
704#define BMIC_LAST_FAILURE_WRONG_REPLACE 0x21
705#define BMIC_LAST_FAILURE_GDP_VPD_INQUIRY_FAILED 0x22
706#define BMIC_LAST_FAILURE_GDP_MODE_SENSE_FAILED 0x23
707#define BMIC_LAST_FAILURE_DRIVE_NOT_IN_48BIT_MODE 0x24
708#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_HOT_PLUG 0x25
709#define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_LOAD_CFG 0x26
710#define BMIC_LAST_FAILURE_PROTOCOL_ADAPTER_FAILED 0x27
711#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_EMPTY 0x28
712#define BMIC_LAST_FAILURE_FAULTY_ID_BAY_OCCUPIED 0x29
713#define BMIC_LAST_FAILURE_FAULTY_ID_INVALID_BAY 0x2a
714#define BMIC_LAST_FAILURE_WRITE_RETRIES_FAILED 0x2b
715
716#define BMIC_LAST_FAILURE_SMART_ERROR_REPORTED 0x37
717#define BMIC_LAST_FAILURE_PHY_RESET_FAILED 0x38
718#define BMIC_LAST_FAILURE_ONLY_ONE_CTLR_CAN_SEE_DRIVE 0x40
719#define BMIC_LAST_FAILURE_KC_VOLUME_FAILED 0x41
720#define BMIC_LAST_FAILURE_UNEXPECTED_REPLACEMENT 0x42
721#define BMIC_LAST_FAILURE_OFFLINE_ERASE 0x80
722#define BMIC_LAST_FAILURE_OFFLINE_TOO_SMALL 0x81
723#define BMIC_LAST_FAILURE_OFFLINE_DRIVE_TYPE_MIX 0x82
724#define BMIC_LAST_FAILURE_OFFLINE_ERASE_COMPLETE 0x83
725
726 u8 flags;
727 u8 more_flags;
728 u8 scsi_lun; /* SCSI LUN for phys drive */
729 u8 yet_more_flags;
730 u8 even_more_flags;
731 __le32 spi_speed_rules;/* SPI Speed data:Ultra disable diagnose */
732 u8 phys_connector[2]; /* connector number on controller */
733 u8 phys_box_on_bus; /* phys enclosure this drive resides */
734 u8 phys_bay_in_box; /* phys drv bay this drive resides */
735 __le32 rpm; /* Drive rotational speed in rpm */
736 u8 device_type; /* type of drive */
737 u8 sata_version; /* only valid when drive_type is SATA */
738 __le64 big_total_block_count;
739 __le64 ris_starting_lba;
740 __le32 ris_size;
741 u8 wwid[20];
742 u8 controller_phy_map[32];
743 __le16 phy_count;
744 u8 phy_connected_dev_type[256];
745 u8 phy_to_drive_bay_num[256];
746 __le16 phy_to_attached_dev_index[256];
747 u8 box_index;
748 u8 reserved;
749 __le16 extra_physical_drive_flags;
750#define BMIC_PHYS_DRIVE_SUPPORTS_GAS_GAUGE(idphydrv) \
751 (idphydrv->extra_physical_drive_flags & (1 << 10))
752 u8 negotiated_link_rate[256];
753 u8 phy_to_phy_map[256];
754 u8 redundant_path_present_map;
755 u8 redundant_path_failure_map;
756 u8 active_path_number;
757 __le16 alternate_paths_phys_connector[8];
758 u8 alternate_paths_phys_box_on_port[8];
759 u8 multi_lun_device_lun_count;
760 u8 minimum_good_fw_revision[8];
761 u8 unique_inquiry_bytes[20];
762 u8 current_temperature_degreesC;
763 u8 temperature_threshold_degreesC;
764 u8 max_temperature_degreesC;
765 u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */
766 __le16 current_queue_depth_limit;
767 u8 switch_name[10];
768 __le16 switch_port;
769 u8 alternate_paths_switch_name[40];
770 u8 alternate_paths_switch_port[8];
771 __le16 power_on_hours; /* valid only if gas gauge supported */
772 __le16 percent_endurance_used; /* valid only if gas gauge supported. */
773#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \
774 ((idphydrv->percent_endurance_used & 0x80) || \
775 (idphydrv->percent_endurance_used > 10000))
776 u8 drive_authentication;
777#define BMIC_PHYS_DRIVE_AUTHENTICATED(idphydrv) \
778 (idphydrv->drive_authentication == 0x80)
779 u8 smart_carrier_authentication;
780#define BMIC_SMART_CARRIER_AUTHENTICATION_SUPPORTED(idphydrv) \
781 (idphydrv->smart_carrier_authentication != 0x0)
782#define BMIC_SMART_CARRIER_AUTHENTICATED(idphydrv) \
783 (idphydrv->smart_carrier_authentication == 0x01)
784 u8 smart_carrier_app_fw_version;
785 u8 smart_carrier_bootloader_fw_version;
786 u8 encryption_key_name[64];
787 __le32 misc_drive_flags;
788 __le16 dek_index;
789 u8 padding[112];
790};
791
644#pragma pack() 792#pragma pack()
645#endif /* HPSA_CMD_H */ 793#endif /* HPSA_CMD_H */