aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/hpsa.c275
-rw-r--r--drivers/scsi/hpsa_cmd.h172
2 files changed, 198 insertions, 249 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6bb4611b238a..2b9baea27a58 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -50,6 +50,7 @@
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/percpu-defs.h> 51#include <linux/percpu-defs.h>
52#include <linux/percpu.h> 52#include <linux/percpu.h>
53#include <asm/unaligned.h>
53#include <asm/div64.h> 54#include <asm/div64.h>
54#include "hpsa_cmd.h" 55#include "hpsa_cmd.h"
55#include "hpsa.h" 56#include "hpsa.h"
@@ -229,7 +230,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
229 struct CommandList *c); 230 struct CommandList *c);
230/* performant mode helper functions */ 231/* performant mode helper functions */
231static void calc_bucket_map(int *bucket, int num_buckets, 232static void calc_bucket_map(int *bucket, int num_buckets,
232 int nsgs, int min_blocks, int *bucket_map); 233 int nsgs, int min_blocks, u32 *bucket_map);
233static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 234static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
234static inline u32 next_command(struct ctlr_info *h, u8 q); 235static inline u32 next_command(struct ctlr_info *h, u8 q);
235static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 236static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -919,7 +920,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
919 920
920 /* If this device a non-zero lun of a multi-lun device 921 /* If this device a non-zero lun of a multi-lun device
921 * byte 4 of the 8-byte LUN addr will contain the logical 922 * byte 4 of the 8-byte LUN addr will contain the logical
922 * unit no, zero otherise. 923 * unit no, zero otherwise.
923 */ 924 */
924 if (device->scsi3addr[4] == 0) { 925 if (device->scsi3addr[4] == 0) {
925 /* This is not a non-zero lun of a multi-lun device */ 926 /* This is not a non-zero lun of a multi-lun device */
@@ -1504,7 +1505,7 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1504 chain_block = h->cmd_sg_list[c->cmdindex]; 1505 chain_block = h->cmd_sg_list[c->cmdindex];
1505 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); 1506 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1506 chain_len = sizeof(*chain_sg) * 1507 chain_len = sizeof(*chain_sg) *
1507 (c->Header.SGTotal - h->max_cmd_sg_entries); 1508 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1508 chain_sg->Len = cpu_to_le32(chain_len); 1509 chain_sg->Len = cpu_to_le32(chain_len);
1509 temp64 = pci_map_single(h->pdev, chain_block, chain_len, 1510 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1510 PCI_DMA_TODEVICE); 1511 PCI_DMA_TODEVICE);
@@ -1693,7 +1694,7 @@ static void complete_scsi_command(struct CommandList *cp)
1693 1694
1694 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1695 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1695 if ((cp->cmd_type == CMD_SCSI) && 1696 if ((cp->cmd_type == CMD_SCSI) &&
1696 (cp->Header.SGTotal > h->max_cmd_sg_entries)) 1697 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1697 hpsa_unmap_sg_chain_block(h, cp); 1698 hpsa_unmap_sg_chain_block(h, cp);
1698 1699
1699 cmd->result = (DID_OK << 16); /* host byte */ 1700 cmd->result = (DID_OK << 16); /* host byte */
@@ -1726,8 +1727,10 @@ static void complete_scsi_command(struct CommandList *cp)
1726 */ 1727 */
1727 if (cp->cmd_type == CMD_IOACCEL1) { 1728 if (cp->cmd_type == CMD_IOACCEL1) {
1728 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1729 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1729 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); 1730 cp->Header.SGList = scsi_sg_count(cmd);
1730 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; 1731 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1732 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1733 IOACCEL1_IOFLAGS_CDBLEN_MASK;
1731 cp->Header.tag = c->tag; 1734 cp->Header.tag = c->tag;
1732 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1735 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1733 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1736 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
@@ -2191,15 +2194,13 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2191 le16_to_cpu(map_buff->row_cnt)); 2194 le16_to_cpu(map_buff->row_cnt));
2192 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2195 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2193 le16_to_cpu(map_buff->layout_map_count)); 2196 le16_to_cpu(map_buff->layout_map_count));
2194 dev_info(&h->pdev->dev, "flags = %u\n", 2197 dev_info(&h->pdev->dev, "flags = 0x%x\n",
2195 le16_to_cpu(map_buff->flags)); 2198 le16_to_cpu(map_buff->flags));
2196 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) 2199 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2197 dev_info(&h->pdev->dev, "encrypytion = ON\n"); 2200 le16_to_cpu(map_buff->flags) &
2198 else 2201 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
2199 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2200 dev_info(&h->pdev->dev, "dekindex = %u\n", 2202 dev_info(&h->pdev->dev, "dekindex = %u\n",
2201 le16_to_cpu(map_buff->dekindex)); 2203 le16_to_cpu(map_buff->dekindex));
2202
2203 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2204 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2204 for (map = 0; map < map_cnt; map++) { 2205 for (map = 0; map < map_cnt; map++) {
2205 dev_info(&h->pdev->dev, "Map%u:\n", map); 2206 dev_info(&h->pdev->dev, "Map%u:\n", map);
@@ -2741,8 +2742,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2741 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 2742 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2742 struct hpsa_scsi_dev_t *d; /* device of request being aborted */ 2743 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2743 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ 2744 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2744 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2745 __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2745 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2746 __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2746 2747
2747 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) 2748 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2748 return 0; /* no match */ 2749 return 0; /* no match */
@@ -2761,8 +2762,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2761 return 0; /* no match */ 2762 return 0; /* no match */
2762 2763
2763 it_nexus = cpu_to_le32(d->ioaccel_handle); 2764 it_nexus = cpu_to_le32(d->ioaccel_handle);
2764 scsi_nexus = cpu_to_le32(c2a->scsi_nexus); 2765 scsi_nexus = c2a->scsi_nexus;
2765 find = c2a->scsi_nexus; 2766 find = le32_to_cpu(c2a->scsi_nexus);
2766 2767
2767 if (h->raid_offload_debug > 0) 2768 if (h->raid_offload_debug > 0)
2768 dev_info(&h->pdev->dev, 2769 dev_info(&h->pdev->dev,
@@ -3284,11 +3285,11 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3284 3285
3285 c->Header.SGList = use_sg; 3286 c->Header.SGList = use_sg;
3286 /* Fill out the command structure to submit */ 3287 /* Fill out the command structure to submit */
3287 cp->dev_handle = ioaccel_handle & 0xFFFF; 3288 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3288 cp->transfer_len = total_len; 3289 cp->transfer_len = cpu_to_le32(total_len);
3289 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | 3290 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3290 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); 3291 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3291 cp->control = control; 3292 cp->control = cpu_to_le32(control);
3292 memcpy(cp->CDB, cdb, cdb_len); 3293 memcpy(cp->CDB, cdb, cdb_len);
3293 memcpy(cp->CISS_LUN, scsi3addr, 8); 3294 memcpy(cp->CISS_LUN, scsi3addr, 8);
3294 /* Tag was already set at init time. */ 3295 /* Tag was already set at init time. */
@@ -3324,7 +3325,7 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
3324 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3325 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3325 3326
3326 /* Are we doing encryption on this device */ 3327 /* Are we doing encryption on this device */
3327 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) 3328 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3328 return; 3329 return;
3329 /* Set the data encryption key index. */ 3330 /* Set the data encryption key index. */
3330 cp->dekindex = map->dekindex; 3331 cp->dekindex = map->dekindex;
@@ -3340,96 +3341,33 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
3340 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 3341 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3341 case WRITE_6: 3342 case WRITE_6:
3342 case READ_6: 3343 case READ_6:
3343 if (map->volume_blk_size == 512) { 3344 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3344 cp->tweak_lower =
3345 (((u32) cmd->cmnd[2]) << 8) |
3346 cmd->cmnd[3];
3347 cp->tweak_upper = 0;
3348 } else {
3349 first_block =
3350 (((u64) cmd->cmnd[2]) << 8) |
3351 cmd->cmnd[3];
3352 first_block = (first_block * map->volume_blk_size)/512;
3353 cp->tweak_lower = (u32)first_block;
3354 cp->tweak_upper = (u32)(first_block >> 32);
3355 }
3356 break; 3345 break;
3357 case WRITE_10: 3346 case WRITE_10:
3358 case READ_10: 3347 case READ_10:
3359 if (map->volume_blk_size == 512) {
3360 cp->tweak_lower =
3361 (((u32) cmd->cmnd[2]) << 24) |
3362 (((u32) cmd->cmnd[3]) << 16) |
3363 (((u32) cmd->cmnd[4]) << 8) |
3364 cmd->cmnd[5];
3365 cp->tweak_upper = 0;
3366 } else {
3367 first_block =
3368 (((u64) cmd->cmnd[2]) << 24) |
3369 (((u64) cmd->cmnd[3]) << 16) |
3370 (((u64) cmd->cmnd[4]) << 8) |
3371 cmd->cmnd[5];
3372 first_block = (first_block * map->volume_blk_size)/512;
3373 cp->tweak_lower = (u32)first_block;
3374 cp->tweak_upper = (u32)(first_block >> 32);
3375 }
3376 break;
3377 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 3348 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3378 case WRITE_12: 3349 case WRITE_12:
3379 case READ_12: 3350 case READ_12:
3380 if (map->volume_blk_size == 512) { 3351 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3381 cp->tweak_lower =
3382 (((u32) cmd->cmnd[2]) << 24) |
3383 (((u32) cmd->cmnd[3]) << 16) |
3384 (((u32) cmd->cmnd[4]) << 8) |
3385 cmd->cmnd[5];
3386 cp->tweak_upper = 0;
3387 } else {
3388 first_block =
3389 (((u64) cmd->cmnd[2]) << 24) |
3390 (((u64) cmd->cmnd[3]) << 16) |
3391 (((u64) cmd->cmnd[4]) << 8) |
3392 cmd->cmnd[5];
3393 first_block = (first_block * map->volume_blk_size)/512;
3394 cp->tweak_lower = (u32)first_block;
3395 cp->tweak_upper = (u32)(first_block >> 32);
3396 }
3397 break; 3352 break;
3398 case WRITE_16: 3353 case WRITE_16:
3399 case READ_16: 3354 case READ_16:
3400 if (map->volume_blk_size == 512) { 3355 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3401 cp->tweak_lower =
3402 (((u32) cmd->cmnd[6]) << 24) |
3403 (((u32) cmd->cmnd[7]) << 16) |
3404 (((u32) cmd->cmnd[8]) << 8) |
3405 cmd->cmnd[9];
3406 cp->tweak_upper =
3407 (((u32) cmd->cmnd[2]) << 24) |
3408 (((u32) cmd->cmnd[3]) << 16) |
3409 (((u32) cmd->cmnd[4]) << 8) |
3410 cmd->cmnd[5];
3411 } else {
3412 first_block =
3413 (((u64) cmd->cmnd[2]) << 56) |
3414 (((u64) cmd->cmnd[3]) << 48) |
3415 (((u64) cmd->cmnd[4]) << 40) |
3416 (((u64) cmd->cmnd[5]) << 32) |
3417 (((u64) cmd->cmnd[6]) << 24) |
3418 (((u64) cmd->cmnd[7]) << 16) |
3419 (((u64) cmd->cmnd[8]) << 8) |
3420 cmd->cmnd[9];
3421 first_block = (first_block * map->volume_blk_size)/512;
3422 cp->tweak_lower = (u32)first_block;
3423 cp->tweak_upper = (u32)(first_block >> 32);
3424 }
3425 break; 3356 break;
3426 default: 3357 default:
3427 dev_err(&h->pdev->dev, 3358 dev_err(&h->pdev->dev,
3428 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n", 3359 "ERROR: %s: size (0x%x) not supported for encryption\n",
3429 __func__); 3360 __func__, cmd->cmnd[0]);
3430 BUG(); 3361 BUG();
3431 break; 3362 break;
3432 } 3363 }
3364
3365 if (le32_to_cpu(map->volume_blk_size) != 512)
3366 first_block = first_block *
3367 le32_to_cpu(map->volume_blk_size)/512;
3368
3369 cp->tweak_lower = cpu_to_le32(first_block);
3370 cp->tweak_upper = cpu_to_le32(first_block >> 32);
3433} 3371}
3434 3372
3435static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3373static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
@@ -3506,9 +3444,9 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3506 /* Set encryption parameters, if necessary */ 3444 /* Set encryption parameters, if necessary */
3507 set_encrypt_ioaccel2(h, c, cp); 3445 set_encrypt_ioaccel2(h, c, cp);
3508 3446
3509 cp->scsi_nexus = ioaccel_handle; 3447 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3510 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | 3448 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT |
3511 DIRECT_LOOKUP_BIT; 3449 DIRECT_LOOKUP_BIT);
3512 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3450 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3513 3451
3514 /* fill in sg elements */ 3452 /* fill in sg elements */
@@ -3543,21 +3481,22 @@ static void raid_map_helper(struct raid_map_data *map,
3543{ 3481{
3544 if (offload_to_mirror == 0) { 3482 if (offload_to_mirror == 0) {
3545 /* use physical disk in the first mirrored group. */ 3483 /* use physical disk in the first mirrored group. */
3546 *map_index %= map->data_disks_per_row; 3484 *map_index %= le16_to_cpu(map->data_disks_per_row);
3547 return; 3485 return;
3548 } 3486 }
3549 do { 3487 do {
3550 /* determine mirror group that *map_index indicates */ 3488 /* determine mirror group that *map_index indicates */
3551 *current_group = *map_index / map->data_disks_per_row; 3489 *current_group = *map_index /
3490 le16_to_cpu(map->data_disks_per_row);
3552 if (offload_to_mirror == *current_group) 3491 if (offload_to_mirror == *current_group)
3553 continue; 3492 continue;
3554 if (*current_group < (map->layout_map_count - 1)) { 3493 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
3555 /* select map index from next group */ 3494 /* select map index from next group */
3556 *map_index += map->data_disks_per_row; 3495 *map_index += le16_to_cpu(map->data_disks_per_row);
3557 (*current_group)++; 3496 (*current_group)++;
3558 } else { 3497 } else {
3559 /* select map index from first group */ 3498 /* select map index from first group */
3560 *map_index %= map->data_disks_per_row; 3499 *map_index %= le16_to_cpu(map->data_disks_per_row);
3561 *current_group = 0; 3500 *current_group = 0;
3562 } 3501 }
3563 } while (offload_to_mirror != *current_group); 3502 } while (offload_to_mirror != *current_group);
@@ -3595,6 +3534,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3595 u32 disk_block_cnt; 3534 u32 disk_block_cnt;
3596 u8 cdb[16]; 3535 u8 cdb[16];
3597 u8 cdb_len; 3536 u8 cdb_len;
3537 u16 strip_size;
3598#if BITS_PER_LONG == 32 3538#if BITS_PER_LONG == 32
3599 u64 tmpdiv; 3539 u64 tmpdiv;
3600#endif 3540#endif
@@ -3668,11 +3608,14 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3668 return IO_ACCEL_INELIGIBLE; 3608 return IO_ACCEL_INELIGIBLE;
3669 3609
3670 /* check for invalid block or wraparound */ 3610 /* check for invalid block or wraparound */
3671 if (last_block >= map->volume_blk_cnt || last_block < first_block) 3611 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
3612 last_block < first_block)
3672 return IO_ACCEL_INELIGIBLE; 3613 return IO_ACCEL_INELIGIBLE;
3673 3614
3674 /* calculate stripe information for the request */ 3615 /* calculate stripe information for the request */
3675 blocks_per_row = map->data_disks_per_row * map->strip_size; 3616 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
3617 le16_to_cpu(map->strip_size);
3618 strip_size = le16_to_cpu(map->strip_size);
3676#if BITS_PER_LONG == 32 3619#if BITS_PER_LONG == 32
3677 tmpdiv = first_block; 3620 tmpdiv = first_block;
3678 (void) do_div(tmpdiv, blocks_per_row); 3621 (void) do_div(tmpdiv, blocks_per_row);
@@ -3683,18 +3626,18 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3683 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3626 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3684 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3627 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3685 tmpdiv = first_row_offset; 3628 tmpdiv = first_row_offset;
3686 (void) do_div(tmpdiv, map->strip_size); 3629 (void) do_div(tmpdiv, strip_size);
3687 first_column = tmpdiv; 3630 first_column = tmpdiv;
3688 tmpdiv = last_row_offset; 3631 tmpdiv = last_row_offset;
3689 (void) do_div(tmpdiv, map->strip_size); 3632 (void) do_div(tmpdiv, strip_size);
3690 last_column = tmpdiv; 3633 last_column = tmpdiv;
3691#else 3634#else
3692 first_row = first_block / blocks_per_row; 3635 first_row = first_block / blocks_per_row;
3693 last_row = last_block / blocks_per_row; 3636 last_row = last_block / blocks_per_row;
3694 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3637 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3695 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3638 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3696 first_column = first_row_offset / map->strip_size; 3639 first_column = first_row_offset / strip_size;
3697 last_column = last_row_offset / map->strip_size; 3640 last_column = last_row_offset / strip_size;
3698#endif 3641#endif
3699 3642
3700 /* if this isn't a single row/column then give to the controller */ 3643 /* if this isn't a single row/column then give to the controller */
@@ -3702,10 +3645,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3702 return IO_ACCEL_INELIGIBLE; 3645 return IO_ACCEL_INELIGIBLE;
3703 3646
3704 /* proceeding with driver mapping */ 3647 /* proceeding with driver mapping */
3705 total_disks_per_row = map->data_disks_per_row + 3648 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
3706 map->metadata_disks_per_row; 3649 le16_to_cpu(map->metadata_disks_per_row);
3707 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3650 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3708 map->row_cnt; 3651 le16_to_cpu(map->row_cnt);
3709 map_index = (map_row * total_disks_per_row) + first_column; 3652 map_index = (map_row * total_disks_per_row) + first_column;
3710 3653
3711 switch (dev->raid_level) { 3654 switch (dev->raid_level) {
@@ -3716,23 +3659,24 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3716 * (2-drive R1 and R10 with even # of drives.) 3659 * (2-drive R1 and R10 with even # of drives.)
3717 * Appropriate for SSDs, not optimal for HDDs 3660 * Appropriate for SSDs, not optimal for HDDs
3718 */ 3661 */
3719 BUG_ON(map->layout_map_count != 2); 3662 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
3720 if (dev->offload_to_mirror) 3663 if (dev->offload_to_mirror)
3721 map_index += map->data_disks_per_row; 3664 map_index += le16_to_cpu(map->data_disks_per_row);
3722 dev->offload_to_mirror = !dev->offload_to_mirror; 3665 dev->offload_to_mirror = !dev->offload_to_mirror;
3723 break; 3666 break;
3724 case HPSA_RAID_ADM: 3667 case HPSA_RAID_ADM:
3725 /* Handles N-way mirrors (R1-ADM) 3668 /* Handles N-way mirrors (R1-ADM)
3726 * and R10 with # of drives divisible by 3.) 3669 * and R10 with # of drives divisible by 3.)
3727 */ 3670 */
3728 BUG_ON(map->layout_map_count != 3); 3671 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
3729 3672
3730 offload_to_mirror = dev->offload_to_mirror; 3673 offload_to_mirror = dev->offload_to_mirror;
3731 raid_map_helper(map, offload_to_mirror, 3674 raid_map_helper(map, offload_to_mirror,
3732 &map_index, &current_group); 3675 &map_index, &current_group);
3733 /* set mirror group to use next time */ 3676 /* set mirror group to use next time */
3734 offload_to_mirror = 3677 offload_to_mirror =
3735 (offload_to_mirror >= map->layout_map_count - 1) 3678 (offload_to_mirror >=
3679 le16_to_cpu(map->layout_map_count) - 1)
3736 ? 0 : offload_to_mirror + 1; 3680 ? 0 : offload_to_mirror + 1;
3737 dev->offload_to_mirror = offload_to_mirror; 3681 dev->offload_to_mirror = offload_to_mirror;
3738 /* Avoid direct use of dev->offload_to_mirror within this 3682 /* Avoid direct use of dev->offload_to_mirror within this
@@ -3742,14 +3686,16 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3742 break; 3686 break;
3743 case HPSA_RAID_5: 3687 case HPSA_RAID_5:
3744 case HPSA_RAID_6: 3688 case HPSA_RAID_6:
3745 if (map->layout_map_count <= 1) 3689 if (le16_to_cpu(map->layout_map_count) <= 1)
3746 break; 3690 break;
3747 3691
3748 /* Verify first and last block are in same RAID group */ 3692 /* Verify first and last block are in same RAID group */
3749 r5or6_blocks_per_row = 3693 r5or6_blocks_per_row =
3750 map->strip_size * map->data_disks_per_row; 3694 le16_to_cpu(map->strip_size) *
3695 le16_to_cpu(map->data_disks_per_row);
3751 BUG_ON(r5or6_blocks_per_row == 0); 3696 BUG_ON(r5or6_blocks_per_row == 0);
3752 stripesize = r5or6_blocks_per_row * map->layout_map_count; 3697 stripesize = r5or6_blocks_per_row *
3698 le16_to_cpu(map->layout_map_count);
3753#if BITS_PER_LONG == 32 3699#if BITS_PER_LONG == 32
3754 tmpdiv = first_block; 3700 tmpdiv = first_block;
3755 first_group = do_div(tmpdiv, stripesize); 3701 first_group = do_div(tmpdiv, stripesize);
@@ -3812,19 +3758,19 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3812 r5or6_blocks_per_row); 3758 r5or6_blocks_per_row);
3813 3759
3814 first_column = r5or6_first_column = 3760 first_column = r5or6_first_column =
3815 r5or6_first_row_offset / map->strip_size; 3761 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
3816 r5or6_last_column = 3762 r5or6_last_column =
3817 r5or6_last_row_offset / map->strip_size; 3763 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
3818#endif 3764#endif
3819 if (r5or6_first_column != r5or6_last_column) 3765 if (r5or6_first_column != r5or6_last_column)
3820 return IO_ACCEL_INELIGIBLE; 3766 return IO_ACCEL_INELIGIBLE;
3821 3767
3822 /* Request is eligible */ 3768 /* Request is eligible */
3823 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3769 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3824 map->row_cnt; 3770 le16_to_cpu(map->row_cnt);
3825 3771
3826 map_index = (first_group * 3772 map_index = (first_group *
3827 (map->row_cnt * total_disks_per_row)) + 3773 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
3828 (map_row * total_disks_per_row) + first_column; 3774 (map_row * total_disks_per_row) + first_column;
3829 break; 3775 break;
3830 default: 3776 default:
@@ -3832,8 +3778,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3832 } 3778 }
3833 3779
3834 disk_handle = dd[map_index].ioaccel_handle; 3780 disk_handle = dd[map_index].ioaccel_handle;
3835 disk_block = map->disk_starting_blk + (first_row * map->strip_size) + 3781 disk_block = le64_to_cpu(map->disk_starting_blk) +
3836 (first_row_offset - (first_column * map->strip_size)); 3782 first_row * le16_to_cpu(map->strip_size) +
3783 (first_row_offset - first_column *
3784 le16_to_cpu(map->strip_size));
3837 disk_block_cnt = block_cnt; 3785 disk_block_cnt = block_cnt;
3838 3786
3839 /* handle differing logical/physical block sizes */ 3787 /* handle differing logical/physical block sizes */
@@ -4227,13 +4175,15 @@ static void swizzle_abort_tag(u8 *tag)
4227} 4175}
4228 4176
4229static void hpsa_get_tag(struct ctlr_info *h, 4177static void hpsa_get_tag(struct ctlr_info *h,
4230 struct CommandList *c, u32 *taglower, u32 *tagupper) 4178 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4231{ 4179{
4180 u64 tag;
4232 if (c->cmd_type == CMD_IOACCEL1) { 4181 if (c->cmd_type == CMD_IOACCEL1) {
4233 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4182 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4234 &h->ioaccel_cmd_pool[c->cmdindex]; 4183 &h->ioaccel_cmd_pool[c->cmdindex];
4235 *tagupper = (u32) (cm1->tag >> 32); 4184 tag = le64_to_cpu(cm1->tag);
4236 *taglower = (u32) (cm1->tag & 0x0ffffffffULL); 4185 *tagupper = cpu_to_le32(tag >> 32);
4186 *taglower = cpu_to_le32(tag);
4237 return; 4187 return;
4238 } 4188 }
4239 if (c->cmd_type == CMD_IOACCEL2) { 4189 if (c->cmd_type == CMD_IOACCEL2) {
@@ -4244,8 +4194,9 @@ static void hpsa_get_tag(struct ctlr_info *h,
4244 *taglower = cm2->Tag; 4194 *taglower = cm2->Tag;
4245 return; 4195 return;
4246 } 4196 }
4247 *tagupper = (u32) (c->Header.tag >> 32); 4197 tag = le64_to_cpu(c->Header.tag);
4248 *taglower = (u32) (c->Header.tag & 0x0ffffffffULL); 4198 *tagupper = cpu_to_le32(tag >> 32);
4199 *taglower = cpu_to_le32(tag);
4249} 4200}
4250 4201
4251static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4202static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
@@ -4254,7 +4205,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4254 int rc = IO_OK; 4205 int rc = IO_OK;
4255 struct CommandList *c; 4206 struct CommandList *c;
4256 struct ErrorInfo *ei; 4207 struct ErrorInfo *ei;
4257 u32 tagupper, taglower; 4208 __le32 tagupper, taglower;
4258 4209
4259 c = cmd_special_alloc(h); 4210 c = cmd_special_alloc(h);
4260 if (c == NULL) { /* trouble... */ 4211 if (c == NULL) { /* trouble... */
@@ -4479,7 +4430,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4479 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4430 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4480 char msg[256]; /* For debug messaging. */ 4431 char msg[256]; /* For debug messaging. */
4481 int ml = 0; 4432 int ml = 0;
4482 u32 tagupper, taglower; 4433 __le32 tagupper, taglower;
4483 4434
4484 /* Find the controller of the command to be aborted */ 4435 /* Find the controller of the command to be aborted */
4485 h = sdev_to_hba(sc->device); 4436 h = sdev_to_hba(sc->device);
@@ -4884,7 +4835,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4884 } 4835 }
4885 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4836 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4886 /* use the kernel address the cmd block for tag */ 4837 /* use the kernel address the cmd block for tag */
4887 c->Header.tag = c->busaddr; 4838 c->Header.tag = cpu_to_le64(c->busaddr);
4888 4839
4889 /* Fill in Request block */ 4840 /* Fill in Request block */
4890 memcpy(&c->Request, &iocommand.Request, 4841 memcpy(&c->Request, &iocommand.Request,
@@ -4940,7 +4891,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4940 u64 temp64; 4891 u64 temp64;
4941 BYTE sg_used = 0; 4892 BYTE sg_used = 0;
4942 int status = 0; 4893 int status = 0;
4943 int i;
4944 u32 left; 4894 u32 left;
4945 u32 sz; 4895 u32 sz;
4946 BYTE __user *data_ptr; 4896 BYTE __user *data_ptr;
@@ -5014,7 +4964,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5014 c->Header.SGList = (u8) sg_used; 4964 c->Header.SGList = (u8) sg_used;
5015 c->Header.SGTotal = cpu_to_le16(sg_used); 4965 c->Header.SGTotal = cpu_to_le16(sg_used);
5016 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 4966 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5017 c->Header.tag = c->busaddr; 4967 c->Header.tag = cpu_to_le64(c->busaddr);
5018 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 4968 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5019 if (ioc->buf_size > 0) { 4969 if (ioc->buf_size > 0) {
5020 int i; 4970 int i;
@@ -5047,6 +4997,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5047 goto cleanup0; 4997 goto cleanup0;
5048 } 4998 }
5049 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 4999 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5000 int i;
5001
5050 /* Copy the data out of the buffer we created */ 5002 /* Copy the data out of the buffer we created */
5051 BYTE __user *ptr = ioc->buf; 5003 BYTE __user *ptr = ioc->buf;
5052 for (i = 0; i < sg_used; i++) { 5004 for (i = 0; i < sg_used; i++) {
@@ -5062,6 +5014,8 @@ cleanup0:
5062 cmd_special_free(h, c); 5014 cmd_special_free(h, c);
5063cleanup1: 5015cleanup1:
5064 if (buff) { 5016 if (buff) {
5017 int i;
5018
5065 for (i = 0; i < sg_used; i++) 5019 for (i = 0; i < sg_used; i++)
5066 kfree(buff[i]); 5020 kfree(buff[i]);
5067 kfree(buff); 5021 kfree(buff);
@@ -5173,7 +5127,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5173{ 5127{
5174 int pci_dir = XFER_NONE; 5128 int pci_dir = XFER_NONE;
5175 struct CommandList *a; /* for commands to be aborted */ 5129 struct CommandList *a; /* for commands to be aborted */
5176 u32 tupper, tlower;
5177 5130
5178 c->cmd_type = CMD_IOCTL_PEND; 5131 c->cmd_type = CMD_IOCTL_PEND;
5179 c->Header.ReplyQueue = 0; 5132 c->Header.ReplyQueue = 0;
@@ -5184,7 +5137,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5184 c->Header.SGList = 0; 5137 c->Header.SGList = 0;
5185 c->Header.SGTotal = cpu_to_le16(0); 5138 c->Header.SGTotal = cpu_to_le16(0);
5186 } 5139 }
5187 c->Header.tag = c->busaddr; 5140 c->Header.tag = cpu_to_le64(c->busaddr);
5188 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5141 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5189 5142
5190 if (cmd_type == TYPE_CMD) { 5143 if (cmd_type == TYPE_CMD) {
@@ -5281,10 +5234,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5281 break; 5234 break;
5282 case HPSA_ABORT_MSG: 5235 case HPSA_ABORT_MSG:
5283 a = buff; /* point to command to be aborted */ 5236 a = buff; /* point to command to be aborted */
5284 dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx", 5237 dev_dbg(&h->pdev->dev,
5238 "Abort Tag:0x%016llx request Tag:0x%016llx",
5285 a->Header.tag, c->Header.tag); 5239 a->Header.tag, c->Header.tag);
5286 tlower = (u32) (a->Header.tag >> 32);
5287 tupper = (u32) (a->Header.tag & 0x0ffffffffULL);
5288 c->Request.CDBLen = 16; 5240 c->Request.CDBLen = 16;
5289 c->Request.type_attr_dir = 5241 c->Request.type_attr_dir =
5290 TYPE_ATTR_DIR(cmd_type, 5242 TYPE_ATTR_DIR(cmd_type,
@@ -5295,14 +5247,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5295 c->Request.CDB[2] = 0x00; /* reserved */ 5247 c->Request.CDB[2] = 0x00; /* reserved */
5296 c->Request.CDB[3] = 0x00; /* reserved */ 5248 c->Request.CDB[3] = 0x00; /* reserved */
5297 /* Tag to abort goes in CDB[4]-CDB[11] */ 5249 /* Tag to abort goes in CDB[4]-CDB[11] */
5298 c->Request.CDB[4] = tlower & 0xFF; 5250 memcpy(&c->Request.CDB[4], &a->Header.tag,
5299 c->Request.CDB[5] = (tlower >> 8) & 0xFF; 5251 sizeof(a->Header.tag));
5300 c->Request.CDB[6] = (tlower >> 16) & 0xFF;
5301 c->Request.CDB[7] = (tlower >> 24) & 0xFF;
5302 c->Request.CDB[8] = tupper & 0xFF;
5303 c->Request.CDB[9] = (tupper >> 8) & 0xFF;
5304 c->Request.CDB[10] = (tupper >> 16) & 0xFF;
5305 c->Request.CDB[11] = (tupper >> 24) & 0xFF;
5306 c->Request.CDB[12] = 0x00; /* reserved */ 5252 c->Request.CDB[12] = 0x00; /* reserved */
5307 c->Request.CDB[13] = 0x00; /* reserved */ 5253 c->Request.CDB[13] = 0x00; /* reserved */
5308 c->Request.CDB[14] = 0x00; /* reserved */ 5254 c->Request.CDB[14] = 0x00; /* reserved */
@@ -5633,7 +5579,8 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5633 static const size_t cmd_sz = sizeof(*cmd) + 5579 static const size_t cmd_sz = sizeof(*cmd) +
5634 sizeof(cmd->ErrorDescriptor); 5580 sizeof(cmd->ErrorDescriptor);
5635 dma_addr_t paddr64; 5581 dma_addr_t paddr64;
5636 uint32_t paddr32, tag; 5582 __le32 paddr32;
5583 u32 tag;
5637 void __iomem *vaddr; 5584 void __iomem *vaddr;
5638 int i, err; 5585 int i, err;
5639 5586
@@ -5661,12 +5608,12 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5661 * although there's no guarantee, we assume that the address is at 5608 * although there's no guarantee, we assume that the address is at
5662 * least 4-byte aligned (most likely, it's page-aligned). 5609 * least 4-byte aligned (most likely, it's page-aligned).
5663 */ 5610 */
5664 paddr32 = paddr64; 5611 paddr32 = cpu_to_le32(paddr64);
5665 5612
5666 cmd->CommandHeader.ReplyQueue = 0; 5613 cmd->CommandHeader.ReplyQueue = 0;
5667 cmd->CommandHeader.SGList = 0; 5614 cmd->CommandHeader.SGList = 0;
5668 cmd->CommandHeader.SGTotal = cpu_to_le16(0); 5615 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5669 cmd->CommandHeader.tag = paddr32; 5616 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
5670 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5617 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5671 5618
5672 cmd->Request.CDBLen = 16; 5619 cmd->Request.CDBLen = 16;
@@ -5677,14 +5624,14 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5677 cmd->Request.CDB[1] = type; 5624 cmd->Request.CDB[1] = type;
5678 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5625 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5679 cmd->ErrorDescriptor.Addr = 5626 cmd->ErrorDescriptor.Addr =
5680 cpu_to_le64((paddr32 + sizeof(*cmd))); 5627 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
5681 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); 5628 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5682 5629
5683 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 5630 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
5684 5631
5685 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 5632 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5686 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 5633 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5687 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 5634 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
5688 break; 5635 break;
5689 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 5636 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5690 } 5637 }
@@ -5718,8 +5665,6 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5718static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5665static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5719 void __iomem *vaddr, u32 use_doorbell) 5666 void __iomem *vaddr, u32 use_doorbell)
5720{ 5667{
5721 u16 pmcsr;
5722 int pos;
5723 5668
5724 if (use_doorbell) { 5669 if (use_doorbell) {
5725 /* For everything after the P600, the PCI power state method 5670 /* For everything after the P600, the PCI power state method
@@ -5744,6 +5689,8 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5744 * the controller, place the interface device in D3 then to D0, 5689 * the controller, place the interface device in D3 then to D0,
5745 * this causes a secondary PCI reset which will reset the 5690 * this causes a secondary PCI reset which will reset the
5746 * controller." */ 5691 * controller." */
5692 int pos;
5693 u16 pmcsr;
5747 5694
5748 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 5695 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
5749 if (pos == 0) { 5696 if (pos == 0) {
@@ -5754,16 +5701,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5754 } 5701 }
5755 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 5702 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5756 /* enter the D3hot power management state */ 5703 /* enter the D3hot power management state */
5757 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 5704 pci_read_config_word(pdev, pos + PCI_PM_CTRL,
5705 (__force u16 *)&pmcsr);
5758 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5706 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5759 pmcsr |= PCI_D3hot; 5707 pmcsr |= (__force u16) PCI_D3hot;
5760 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5708 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5761 5709
5762 msleep(500); 5710 msleep(500);
5763 5711
5764 /* enter the D0 power management state */ 5712 /* enter the D0 power management state */
5765 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5713 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5766 pmcsr |= PCI_D0; 5714 pmcsr |= (__force u16) PCI_D0;
5767 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5715 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5768 5716
5769 /* 5717 /*
@@ -7172,7 +7120,7 @@ static struct pci_driver hpsa_pci_driver = {
7172 * bits of the command address. 7120 * bits of the command address.
7173 */ 7121 */
7174static void calc_bucket_map(int bucket[], int num_buckets, 7122static void calc_bucket_map(int bucket[], int num_buckets,
7175 int nsgs, int min_blocks, int *bucket_map) 7123 int nsgs, int min_blocks, u32 *bucket_map)
7176{ 7124{
7177 int i, j, b, size; 7125 int i, j, b, size;
7178 7126
@@ -7328,7 +7276,8 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7328 (i * sizeof(struct ErrorInfo))); 7276 (i * sizeof(struct ErrorInfo)));
7329 cp->err_info_len = sizeof(struct ErrorInfo); 7277 cp->err_info_len = sizeof(struct ErrorInfo);
7330 cp->sgl_offset = IOACCEL1_SGLOFFSET; 7278 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7331 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; 7279 cp->host_context_flags =
7280 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7332 cp->timeout_sec = 0; 7281 cp->timeout_sec = 0;
7333 cp->ReplyQueue = 0; 7282 cp->ReplyQueue = 0;
7334 cp->tag = 7283 cp->tag =
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index cb988c41cad9..9bae50cb43a2 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -206,27 +206,27 @@ struct raid_map_disk_data {
206}; 206};
207 207
208struct raid_map_data { 208struct raid_map_data {
209 u32 structure_size; /* Size of entire structure in bytes */ 209 __le32 structure_size; /* Size of entire structure in bytes */
210 u32 volume_blk_size; /* bytes / block in the volume */ 210 __le32 volume_blk_size; /* bytes / block in the volume */
211 u64 volume_blk_cnt; /* logical blocks on the volume */ 211 __le64 volume_blk_cnt; /* logical blocks on the volume */
212 u8 phys_blk_shift; /* Shift factor to convert between 212 u8 phys_blk_shift; /* Shift factor to convert between
213 * units of logical blocks and physical 213 * units of logical blocks and physical
214 * disk blocks */ 214 * disk blocks */
215 u8 parity_rotation_shift; /* Shift factor to convert between units 215 u8 parity_rotation_shift; /* Shift factor to convert between units
216 * of logical stripes and physical 216 * of logical stripes and physical
217 * stripes */ 217 * stripes */
218 u16 strip_size; /* blocks used on each disk / stripe */ 218 __le16 strip_size; /* blocks used on each disk / stripe */
219 u64 disk_starting_blk; /* First disk block used in volume */ 219 __le64 disk_starting_blk; /* First disk block used in volume */
220 u64 disk_blk_cnt; /* disk blocks used by volume / disk */ 220 __le64 disk_blk_cnt; /* disk blocks used by volume / disk */
221 u16 data_disks_per_row; /* data disk entries / row in the map */ 221 __le16 data_disks_per_row; /* data disk entries / row in the map */
222 u16 metadata_disks_per_row; /* mirror/parity disk entries / row 222 __le16 metadata_disks_per_row;/* mirror/parity disk entries / row
223 * in the map */ 223 * in the map */
224 u16 row_cnt; /* rows in each layout map */ 224 __le16 row_cnt; /* rows in each layout map */
225 u16 layout_map_count; /* layout maps (1 map per mirror/parity 225 __le16 layout_map_count; /* layout maps (1 map per mirror/parity
226 * group) */ 226 * group) */
227 u16 flags; /* Bit 0 set if encryption enabled */ 227 __le16 flags; /* Bit 0 set if encryption enabled */
228#define RAID_MAP_FLAG_ENCRYPT_ON 0x01 228#define RAID_MAP_FLAG_ENCRYPT_ON 0x01
229 u16 dekindex; /* Data encryption key index. */ 229 __le16 dekindex; /* Data encryption key index. */
230 u8 reserved[16]; 230 u8 reserved[16];
231 struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; 231 struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
232}; 232};
@@ -313,8 +313,8 @@ union LUNAddr {
313struct CommandListHeader { 313struct CommandListHeader {
314 u8 ReplyQueue; 314 u8 ReplyQueue;
315 u8 SGList; 315 u8 SGList;
316 u16 SGTotal; 316 __le16 SGTotal;
317 u64 tag; 317 __le64 tag;
318 union LUNAddr LUN; 318 union LUNAddr LUN;
319}; 319};
320 320
@@ -338,14 +338,14 @@ struct RequestBlock {
338}; 338};
339 339
340struct ErrDescriptor { 340struct ErrDescriptor {
341 u64 Addr; 341 __le64 Addr;
342 u32 Len; 342 __le32 Len;
343}; 343};
344 344
345struct SGDescriptor { 345struct SGDescriptor {
346 u64 Addr; 346 __le64 Addr;
347 u32 Len; 347 __le32 Len;
348 u32 Ext; 348 __le32 Ext;
349}; 349};
350 350
351union MoreErrInfo { 351union MoreErrInfo {
@@ -420,7 +420,7 @@ struct CommandList {
420 */ 420 */
421#define IOACCEL1_COMMANDLIST_ALIGNMENT 128 421#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
422struct io_accel1_cmd { 422struct io_accel1_cmd {
423 u16 dev_handle; /* 0x00 - 0x01 */ 423 __le16 dev_handle; /* 0x00 - 0x01 */
424 u8 reserved1; /* 0x02 */ 424 u8 reserved1; /* 0x02 */
425 u8 function; /* 0x03 */ 425 u8 function; /* 0x03 */
426 u8 reserved2[8]; /* 0x04 - 0x0B */ 426 u8 reserved2[8]; /* 0x04 - 0x0B */
@@ -430,20 +430,20 @@ struct io_accel1_cmd {
430 u8 reserved4; /* 0x13 */ 430 u8 reserved4; /* 0x13 */
431 u8 sgl_offset; /* 0x14 */ 431 u8 sgl_offset; /* 0x14 */
432 u8 reserved5[7]; /* 0x15 - 0x1B */ 432 u8 reserved5[7]; /* 0x15 - 0x1B */
433 u32 transfer_len; /* 0x1C - 0x1F */ 433 __le32 transfer_len; /* 0x1C - 0x1F */
434 u8 reserved6[4]; /* 0x20 - 0x23 */ 434 u8 reserved6[4]; /* 0x20 - 0x23 */
435 u16 io_flags; /* 0x24 - 0x25 */ 435 __le16 io_flags; /* 0x24 - 0x25 */
436 u8 reserved7[14]; /* 0x26 - 0x33 */ 436 u8 reserved7[14]; /* 0x26 - 0x33 */
437 u8 LUN[8]; /* 0x34 - 0x3B */ 437 u8 LUN[8]; /* 0x34 - 0x3B */
438 u32 control; /* 0x3C - 0x3F */ 438 __le32 control; /* 0x3C - 0x3F */
439 u8 CDB[16]; /* 0x40 - 0x4F */ 439 u8 CDB[16]; /* 0x40 - 0x4F */
440 u8 reserved8[16]; /* 0x50 - 0x5F */ 440 u8 reserved8[16]; /* 0x50 - 0x5F */
441 u16 host_context_flags; /* 0x60 - 0x61 */ 441 __le16 host_context_flags; /* 0x60 - 0x61 */
442 u16 timeout_sec; /* 0x62 - 0x63 */ 442 __le16 timeout_sec; /* 0x62 - 0x63 */
443 u8 ReplyQueue; /* 0x64 */ 443 u8 ReplyQueue; /* 0x64 */
444 u8 reserved9[3]; /* 0x65 - 0x67 */ 444 u8 reserved9[3]; /* 0x65 - 0x67 */
445 u64 tag; /* 0x68 - 0x6F */ 445 __le64 tag; /* 0x68 - 0x6F */
446 u64 host_addr; /* 0x70 - 0x77 */ 446 __le64 host_addr; /* 0x70 - 0x77 */
447 u8 CISS_LUN[8]; /* 0x78 - 0x7F */ 447 u8 CISS_LUN[8]; /* 0x78 - 0x7F */
448 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; 448 struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
449} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); 449} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
@@ -470,8 +470,8 @@ struct io_accel1_cmd {
470#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060 470#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060
471 471
472struct ioaccel2_sg_element { 472struct ioaccel2_sg_element {
473 u64 address; 473 __le64 address;
474 u32 length; 474 __le32 length;
475 u8 reserved[3]; 475 u8 reserved[3];
476 u8 chain_indicator; 476 u8 chain_indicator;
477#define IOACCEL2_CHAIN 0x80 477#define IOACCEL2_CHAIN 0x80
@@ -526,20 +526,20 @@ struct io_accel2_cmd {
526 /* 0=off, 1=on */ 526 /* 0=off, 1=on */
527 u8 reply_queue; /* Reply Queue ID */ 527 u8 reply_queue; /* Reply Queue ID */
528 u8 reserved1; /* Reserved */ 528 u8 reserved1; /* Reserved */
529 u32 scsi_nexus; /* Device Handle */ 529 __le32 scsi_nexus; /* Device Handle */
530 u32 Tag; /* cciss tag, lower 4 bytes only */ 530 __le32 Tag; /* cciss tag, lower 4 bytes only */
531 u32 tweak_lower; /* Encryption tweak, lower 4 bytes */ 531 __le32 tweak_lower; /* Encryption tweak, lower 4 bytes */
532 u8 cdb[16]; /* SCSI Command Descriptor Block */ 532 u8 cdb[16]; /* SCSI Command Descriptor Block */
533 u8 cciss_lun[8]; /* 8 byte SCSI address */ 533 u8 cciss_lun[8]; /* 8 byte SCSI address */
534 u32 data_len; /* Total bytes to transfer */ 534 __le32 data_len; /* Total bytes to transfer */
535 u8 cmd_priority_task_attr; /* priority and task attrs */ 535 u8 cmd_priority_task_attr; /* priority and task attrs */
536#define IOACCEL2_PRIORITY_MASK 0x78 536#define IOACCEL2_PRIORITY_MASK 0x78
537#define IOACCEL2_ATTR_MASK 0x07 537#define IOACCEL2_ATTR_MASK 0x07
538 u8 sg_count; /* Number of sg elements */ 538 u8 sg_count; /* Number of sg elements */
539 u16 dekindex; /* Data encryption key index */ 539 __le16 dekindex; /* Data encryption key index */
540 u64 err_ptr; /* Error Pointer */ 540 __le64 err_ptr; /* Error Pointer */
541 u32 err_len; /* Error Length*/ 541 __le32 err_len; /* Error Length*/
542 u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ 542 __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */
543 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; 543 struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
544 struct io_accel2_scsi_response error_data; 544 struct io_accel2_scsi_response error_data;
545} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); 545} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
@@ -563,18 +563,18 @@ struct hpsa_tmf_struct {
563 u8 reserved1; /* byte 3 Reserved */ 563 u8 reserved1; /* byte 3 Reserved */
564 u32 it_nexus; /* SCSI I-T Nexus */ 564 u32 it_nexus; /* SCSI I-T Nexus */
565 u8 lun_id[8]; /* LUN ID for TMF request */ 565 u8 lun_id[8]; /* LUN ID for TMF request */
566 u64 tag; /* cciss tag associated w/ request */ 566 __le64 tag; /* cciss tag associated w/ request */
567 u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */ 567 __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */
568 u64 error_ptr; /* Error Pointer */ 568 __le64 error_ptr; /* Error Pointer */
569 u32 error_len; /* Error Length */ 569 __le32 error_len; /* Error Length */
570}; 570};
571 571
572/* Configuration Table Structure */ 572/* Configuration Table Structure */
573struct HostWrite { 573struct HostWrite {
574 u32 TransportRequest; 574 __le32 TransportRequest;
575 u32 command_pool_addr_hi; 575 __le32 command_pool_addr_hi;
576 u32 CoalIntDelay; 576 __le32 CoalIntDelay;
577 u32 CoalIntCount; 577 __le32 CoalIntCount;
578}; 578};
579 579
580#define SIMPLE_MODE 0x02 580#define SIMPLE_MODE 0x02
@@ -585,54 +585,54 @@ struct HostWrite {
585#define DRIVER_SUPPORT_UA_ENABLE 0x00000001 585#define DRIVER_SUPPORT_UA_ENABLE 0x00000001
586 586
587struct CfgTable { 587struct CfgTable {
588 u8 Signature[4]; 588 u8 Signature[4];
589 u32 SpecValence; 589 __le32 SpecValence;
590 u32 TransportSupport; 590 __le32 TransportSupport;
591 u32 TransportActive; 591 __le32 TransportActive;
592 struct HostWrite HostWrite; 592 struct HostWrite HostWrite;
593 u32 CmdsOutMax; 593 __le32 CmdsOutMax;
594 u32 BusTypes; 594 __le32 BusTypes;
595 u32 TransMethodOffset; 595 __le32 TransMethodOffset;
596 u8 ServerName[16]; 596 u8 ServerName[16];
597 u32 HeartBeat; 597 __le32 HeartBeat;
598 u32 driver_support; 598 __le32 driver_support;
599#define ENABLE_SCSI_PREFETCH 0x100 599#define ENABLE_SCSI_PREFETCH 0x100
600#define ENABLE_UNIT_ATTN 0x01 600#define ENABLE_UNIT_ATTN 0x01
601 u32 MaxScatterGatherElements; 601 __le32 MaxScatterGatherElements;
602 u32 MaxLogicalUnits; 602 __le32 MaxLogicalUnits;
603 u32 MaxPhysicalDevices; 603 __le32 MaxPhysicalDevices;
604 u32 MaxPhysicalDrivesPerLogicalUnit; 604 __le32 MaxPhysicalDrivesPerLogicalUnit;
605 u32 MaxPerformantModeCommands; 605 __le32 MaxPerformantModeCommands;
606 u32 MaxBlockFetch; 606 __le32 MaxBlockFetch;
607 u32 PowerConservationSupport; 607 __le32 PowerConservationSupport;
608 u32 PowerConservationEnable; 608 __le32 PowerConservationEnable;
609 u32 TMFSupportFlags; 609 __le32 TMFSupportFlags;
610 u8 TMFTagMask[8]; 610 u8 TMFTagMask[8];
611 u8 reserved[0x78 - 0x70]; 611 u8 reserved[0x78 - 0x70];
612 u32 misc_fw_support; /* offset 0x78 */ 612 __le32 misc_fw_support; /* offset 0x78 */
613#define MISC_FW_DOORBELL_RESET (0x02) 613#define MISC_FW_DOORBELL_RESET 0x02
614#define MISC_FW_DOORBELL_RESET2 (0x010) 614#define MISC_FW_DOORBELL_RESET2 0x010
615#define MISC_FW_RAID_OFFLOAD_BASIC (0x020) 615#define MISC_FW_RAID_OFFLOAD_BASIC 0x020
616#define MISC_FW_EVENT_NOTIFY (0x080) 616#define MISC_FW_EVENT_NOTIFY 0x080
617 u8 driver_version[32]; 617 u8 driver_version[32];
618 u32 max_cached_write_size; 618 __le32 max_cached_write_size;
619 u8 driver_scratchpad[16]; 619 u8 driver_scratchpad[16];
620 u32 max_error_info_length; 620 __le32 max_error_info_length;
621 u32 io_accel_max_embedded_sg_count; 621 __le32 io_accel_max_embedded_sg_count;
622 u32 io_accel_request_size_offset; 622 __le32 io_accel_request_size_offset;
623 u32 event_notify; 623 __le32 event_notify;
624#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) 624#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
625#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) 625#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
626 u32 clear_event_notify; 626 __le32 clear_event_notify;
627}; 627};
628 628
629#define NUM_BLOCKFETCH_ENTRIES 8 629#define NUM_BLOCKFETCH_ENTRIES 8
630struct TransTable_struct { 630struct TransTable_struct {
631 u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; 631 __le32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
632 u32 RepQSize; 632 __le32 RepQSize;
633 u32 RepQCount; 633 __le32 RepQCount;
634 u32 RepQCtrAddrLow32; 634 __le32 RepQCtrAddrLow32;
635 u32 RepQCtrAddrHigh32; 635 __le32 RepQCtrAddrHigh32;
636#define MAX_REPLY_QUEUES 64 636#define MAX_REPLY_QUEUES 64
637 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; 637 struct vals32 RepQAddr[MAX_REPLY_QUEUES];
638}; 638};