diff options
author | Wayne Boyer <wayneb@linux.vnet.ibm.com> | 2010-02-19 16:23:36 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-03-03 05:31:33 -0500 |
commit | a32c055feed74246747bf4f45adb765136d3a4d3 (patch) | |
tree | 41b268fa7ce654c8ea46b2ba6e704332449022a4 /drivers/scsi | |
parent | 6c71dcb28ff9b63b814a0b76a256f5dae08d3e0d (diff) |
[SCSI] ipr: add support for new adapter command structures for the next generation chip
Change the adapter command structures such that both 32 bit and 64 bit based
adapters can work with the driver.
Signed-off-by: Wayne Boyer <wayneb@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/ipr.c | 450 | ||||
-rw-r--r-- | drivers/scsi/ipr.h | 61 |
2 files changed, 385 insertions, 126 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 032f0d0e6cb4..359882eadc26 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -131,13 +131,13 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | |||
131 | }; | 131 | }; |
132 | 132 | ||
133 | static const struct ipr_chip_t ipr_chip[] = { | 133 | static const struct ipr_chip_t ipr_chip[] = { |
134 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, | 134 | { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, |
135 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, | 135 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, |
136 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, | 136 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, |
137 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, | 137 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, |
138 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, | 138 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] }, |
139 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, | 139 | { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, |
140 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } | 140 | { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] } |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static int ipr_max_bus_speeds [] = { | 143 | static int ipr_max_bus_speeds [] = { |
@@ -468,7 +468,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, | |||
468 | trace_entry->time = jiffies; | 468 | trace_entry->time = jiffies; |
469 | trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; | 469 | trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; |
470 | trace_entry->type = type; | 470 | trace_entry->type = type; |
471 | trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command; | 471 | if (ipr_cmd->ioa_cfg->sis64) |
472 | trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; | ||
473 | else | ||
474 | trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; | ||
472 | trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; | 475 | trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; |
473 | trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; | 476 | trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; |
474 | trace_entry->u.add_data = add_data; | 477 | trace_entry->u.add_data = add_data; |
@@ -488,16 +491,23 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) | |||
488 | { | 491 | { |
489 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 492 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
490 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 493 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; |
491 | dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); | 494 | dma_addr_t dma_addr = ipr_cmd->dma_addr; |
492 | 495 | ||
493 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); | 496 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); |
494 | ioarcb->write_data_transfer_length = 0; | 497 | ioarcb->data_transfer_length = 0; |
495 | ioarcb->read_data_transfer_length = 0; | 498 | ioarcb->read_data_transfer_length = 0; |
496 | ioarcb->write_ioadl_len = 0; | 499 | ioarcb->ioadl_len = 0; |
497 | ioarcb->read_ioadl_len = 0; | 500 | ioarcb->read_ioadl_len = 0; |
498 | ioarcb->write_ioadl_addr = | 501 | |
499 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); | 502 | if (ipr_cmd->ioa_cfg->sis64) |
500 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 503 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
504 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); | ||
505 | else { | ||
506 | ioarcb->write_ioadl_addr = | ||
507 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); | ||
508 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | ||
509 | } | ||
510 | |||
501 | ioasa->ioasc = 0; | 511 | ioasa->ioasc = 0; |
502 | ioasa->residual_data_len = 0; | 512 | ioasa->residual_data_len = 0; |
503 | ioasa->u.gata.status = 0; | 513 | ioasa->u.gata.status = 0; |
@@ -693,6 +703,35 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) | |||
693 | } | 703 | } |
694 | 704 | ||
695 | /** | 705 | /** |
706 | * ipr_send_command - Send driver initiated requests. | ||
707 | * @ipr_cmd: ipr command struct | ||
708 | * | ||
709 | * This function sends a command to the adapter using the correct write call. | ||
710 | * In the case of sis64, calculate the ioarcb size required. Then or in the | ||
711 | * appropriate bits. | ||
712 | * | ||
713 | * Return value: | ||
714 | * none | ||
715 | **/ | ||
716 | static void ipr_send_command(struct ipr_cmnd *ipr_cmd) | ||
717 | { | ||
718 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
719 | dma_addr_t send_dma_addr = ipr_cmd->dma_addr; | ||
720 | |||
721 | if (ioa_cfg->sis64) { | ||
722 | /* The default size is 256 bytes */ | ||
723 | send_dma_addr |= 0x1; | ||
724 | |||
725 | /* If the number of ioadls * size of ioadl > 128 bytes, | ||
726 | then use a 512 byte ioarcb */ | ||
727 | if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) | ||
728 | send_dma_addr |= 0x4; | ||
729 | writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); | ||
730 | } else | ||
731 | writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); | ||
732 | } | ||
733 | |||
734 | /** | ||
696 | * ipr_do_req - Send driver initiated requests. | 735 | * ipr_do_req - Send driver initiated requests. |
697 | * @ipr_cmd: ipr command struct | 736 | * @ipr_cmd: ipr command struct |
698 | * @done: done function | 737 | * @done: done function |
@@ -724,8 +763,8 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, | |||
724 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); | 763 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); |
725 | 764 | ||
726 | mb(); | 765 | mb(); |
727 | writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), | 766 | |
728 | ioa_cfg->regs.ioarrin_reg); | 767 | ipr_send_command(ipr_cmd); |
729 | } | 768 | } |
730 | 769 | ||
731 | /** | 770 | /** |
@@ -747,6 +786,51 @@ static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) | |||
747 | } | 786 | } |
748 | 787 | ||
749 | /** | 788 | /** |
789 | * ipr_init_ioadl - initialize the ioadl for the correct SIS type | ||
790 | * @ipr_cmd: ipr command struct | ||
791 | * @dma_addr: dma address | ||
792 | * @len: transfer length | ||
793 | * @flags: ioadl flag value | ||
794 | * | ||
795 | * This function initializes an ioadl in the case where there is only a single | ||
796 | * descriptor. | ||
797 | * | ||
798 | * Return value: | ||
799 | * nothing | ||
800 | **/ | ||
801 | static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, | ||
802 | u32 len, int flags) | ||
803 | { | ||
804 | struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; | ||
805 | struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; | ||
806 | |||
807 | ipr_cmd->dma_use_sg = 1; | ||
808 | |||
809 | if (ipr_cmd->ioa_cfg->sis64) { | ||
810 | ioadl64->flags = cpu_to_be32(flags); | ||
811 | ioadl64->data_len = cpu_to_be32(len); | ||
812 | ioadl64->address = cpu_to_be64(dma_addr); | ||
813 | |||
814 | ipr_cmd->ioarcb.ioadl_len = | ||
815 | cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); | ||
816 | ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); | ||
817 | } else { | ||
818 | ioadl->flags_and_data_len = cpu_to_be32(flags | len); | ||
819 | ioadl->address = cpu_to_be32(dma_addr); | ||
820 | |||
821 | if (flags == IPR_IOADL_FLAGS_READ_LAST) { | ||
822 | ipr_cmd->ioarcb.read_ioadl_len = | ||
823 | cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | ||
824 | ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); | ||
825 | } else { | ||
826 | ipr_cmd->ioarcb.ioadl_len = | ||
827 | cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | ||
828 | ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); | ||
829 | } | ||
830 | } | ||
831 | } | ||
832 | |||
833 | /** | ||
750 | * ipr_send_blocking_cmd - Send command and sleep on its completion. | 834 | * ipr_send_blocking_cmd - Send command and sleep on its completion. |
751 | * @ipr_cmd: ipr command struct | 835 | * @ipr_cmd: ipr command struct |
752 | * @timeout_func: function to invoke if command times out | 836 | * @timeout_func: function to invoke if command times out |
@@ -803,11 +887,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, | |||
803 | ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; | 887 | ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; |
804 | ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; | 888 | ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; |
805 | 889 | ||
806 | ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); | 890 | ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, |
807 | ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | 891 | sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); |
808 | ipr_cmd->ioadl[0].flags_and_data_len = | ||
809 | cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam)); | ||
810 | ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma); | ||
811 | 892 | ||
812 | if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) | 893 | if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) |
813 | ipr_cmd->done = ipr_process_ccn; | 894 | ipr_cmd->done = ipr_process_ccn; |
@@ -817,8 +898,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, | |||
817 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); | 898 | ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); |
818 | 899 | ||
819 | mb(); | 900 | mb(); |
820 | writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), | 901 | |
821 | ioa_cfg->regs.ioarrin_reg); | 902 | ipr_send_command(ipr_cmd); |
822 | } else { | 903 | } else { |
823 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); | 904 | list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); |
824 | } | 905 | } |
@@ -2976,6 +3057,37 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, | |||
2976 | } | 3057 | } |
2977 | 3058 | ||
2978 | /** | 3059 | /** |
3060 | * ipr_build_ucode_ioadl64 - Build a microcode download IOADL | ||
3061 | * @ipr_cmd: ipr command struct | ||
3062 | * @sglist: scatter/gather list | ||
3063 | * | ||
3064 | * Builds a microcode download IOA data list (IOADL). | ||
3065 | * | ||
3066 | **/ | ||
3067 | static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, | ||
3068 | struct ipr_sglist *sglist) | ||
3069 | { | ||
3070 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | ||
3071 | struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; | ||
3072 | struct scatterlist *scatterlist = sglist->scatterlist; | ||
3073 | int i; | ||
3074 | |||
3075 | ipr_cmd->dma_use_sg = sglist->num_dma_sg; | ||
3076 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | ||
3077 | ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); | ||
3078 | |||
3079 | ioarcb->ioadl_len = | ||
3080 | cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); | ||
3081 | for (i = 0; i < ipr_cmd->dma_use_sg; i++) { | ||
3082 | ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); | ||
3083 | ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); | ||
3084 | ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); | ||
3085 | } | ||
3086 | |||
3087 | ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); | ||
3088 | } | ||
3089 | |||
3090 | /** | ||
2979 | * ipr_build_ucode_ioadl - Build a microcode download IOADL | 3091 | * ipr_build_ucode_ioadl - Build a microcode download IOADL |
2980 | * @ipr_cmd: ipr command struct | 3092 | * @ipr_cmd: ipr command struct |
2981 | * @sglist: scatter/gather list | 3093 | * @sglist: scatter/gather list |
@@ -2987,14 +3099,15 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, | |||
2987 | struct ipr_sglist *sglist) | 3099 | struct ipr_sglist *sglist) |
2988 | { | 3100 | { |
2989 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 3101 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
2990 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | 3102 | struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; |
2991 | struct scatterlist *scatterlist = sglist->scatterlist; | 3103 | struct scatterlist *scatterlist = sglist->scatterlist; |
2992 | int i; | 3104 | int i; |
2993 | 3105 | ||
2994 | ipr_cmd->dma_use_sg = sglist->num_dma_sg; | 3106 | ipr_cmd->dma_use_sg = sglist->num_dma_sg; |
2995 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | 3107 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; |
2996 | ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); | 3108 | ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); |
2997 | ioarcb->write_ioadl_len = | 3109 | |
3110 | ioarcb->ioadl_len = | ||
2998 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); | 3111 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); |
2999 | 3112 | ||
3000 | for (i = 0; i < ipr_cmd->dma_use_sg; i++) { | 3113 | for (i = 0; i < ipr_cmd->dma_use_sg; i++) { |
@@ -3828,14 +3941,19 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
3828 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); | 3941 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); |
3829 | ioarcb = &ipr_cmd->ioarcb; | 3942 | ioarcb = &ipr_cmd->ioarcb; |
3830 | cmd_pkt = &ioarcb->cmd_pkt; | 3943 | cmd_pkt = &ioarcb->cmd_pkt; |
3831 | regs = &ioarcb->add_data.u.regs; | 3944 | |
3945 | if (ipr_cmd->ioa_cfg->sis64) { | ||
3946 | regs = &ipr_cmd->i.ata_ioadl.regs; | ||
3947 | ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); | ||
3948 | } else | ||
3949 | regs = &ioarcb->u.add_data.u.regs; | ||
3832 | 3950 | ||
3833 | ioarcb->res_handle = res->cfgte.res_handle; | 3951 | ioarcb->res_handle = res->cfgte.res_handle; |
3834 | cmd_pkt->request_type = IPR_RQTYPE_IOACMD; | 3952 | cmd_pkt->request_type = IPR_RQTYPE_IOACMD; |
3835 | cmd_pkt->cdb[0] = IPR_RESET_DEVICE; | 3953 | cmd_pkt->cdb[0] = IPR_RESET_DEVICE; |
3836 | if (ipr_is_gata(res)) { | 3954 | if (ipr_is_gata(res)) { |
3837 | cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; | 3955 | cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; |
3838 | ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags)); | 3956 | ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); |
3839 | regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; | 3957 | regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; |
3840 | } | 3958 | } |
3841 | 3959 | ||
@@ -4309,6 +4427,53 @@ static irqreturn_t ipr_isr(int irq, void *devp) | |||
4309 | } | 4427 | } |
4310 | 4428 | ||
4311 | /** | 4429 | /** |
4430 | * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer | ||
4431 | * @ioa_cfg: ioa config struct | ||
4432 | * @ipr_cmd: ipr command struct | ||
4433 | * | ||
4434 | * Return value: | ||
4435 | * 0 on success / -1 on failure | ||
4436 | **/ | ||
4437 | static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, | ||
4438 | struct ipr_cmnd *ipr_cmd) | ||
4439 | { | ||
4440 | int i, nseg; | ||
4441 | struct scatterlist *sg; | ||
4442 | u32 length; | ||
4443 | u32 ioadl_flags = 0; | ||
4444 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | ||
4445 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | ||
4446 | struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; | ||
4447 | |||
4448 | length = scsi_bufflen(scsi_cmd); | ||
4449 | if (!length) | ||
4450 | return 0; | ||
4451 | |||
4452 | nseg = scsi_dma_map(scsi_cmd); | ||
4453 | if (nseg < 0) { | ||
4454 | dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); | ||
4455 | return -1; | ||
4456 | } | ||
4457 | |||
4458 | ipr_cmd->dma_use_sg = nseg; | ||
4459 | |||
4460 | if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { | ||
4461 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; | ||
4462 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | ||
4463 | } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
4464 | ioadl_flags = IPR_IOADL_FLAGS_READ; | ||
4465 | |||
4466 | scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { | ||
4467 | ioadl64[i].flags = cpu_to_be32(ioadl_flags); | ||
4468 | ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); | ||
4469 | ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); | ||
4470 | } | ||
4471 | |||
4472 | ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); | ||
4473 | return 0; | ||
4474 | } | ||
4475 | |||
4476 | /** | ||
4312 | * ipr_build_ioadl - Build a scatter/gather list and map the buffer | 4477 | * ipr_build_ioadl - Build a scatter/gather list and map the buffer |
4313 | * @ioa_cfg: ioa config struct | 4478 | * @ioa_cfg: ioa config struct |
4314 | * @ipr_cmd: ipr command struct | 4479 | * @ipr_cmd: ipr command struct |
@@ -4325,7 +4490,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, | |||
4325 | u32 ioadl_flags = 0; | 4490 | u32 ioadl_flags = 0; |
4326 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; | 4491 | struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; |
4327 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 4492 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
4328 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | 4493 | struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; |
4329 | 4494 | ||
4330 | length = scsi_bufflen(scsi_cmd); | 4495 | length = scsi_bufflen(scsi_cmd); |
4331 | if (!length) | 4496 | if (!length) |
@@ -4342,8 +4507,8 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, | |||
4342 | if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { | 4507 | if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { |
4343 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; | 4508 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; |
4344 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | 4509 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; |
4345 | ioarcb->write_data_transfer_length = cpu_to_be32(length); | 4510 | ioarcb->data_transfer_length = cpu_to_be32(length); |
4346 | ioarcb->write_ioadl_len = | 4511 | ioarcb->ioadl_len = |
4347 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); | 4512 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); |
4348 | } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { | 4513 | } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { |
4349 | ioadl_flags = IPR_IOADL_FLAGS_READ; | 4514 | ioadl_flags = IPR_IOADL_FLAGS_READ; |
@@ -4352,11 +4517,10 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, | |||
4352 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); | 4517 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); |
4353 | } | 4518 | } |
4354 | 4519 | ||
4355 | if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { | 4520 | if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { |
4356 | ioadl = ioarcb->add_data.u.ioadl; | 4521 | ioadl = ioarcb->u.add_data.u.ioadl; |
4357 | ioarcb->write_ioadl_addr = | 4522 | ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + |
4358 | cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + | 4523 | offsetof(struct ipr_ioarcb, u.add_data)); |
4359 | offsetof(struct ipr_ioarcb, add_data)); | ||
4360 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 4524 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; |
4361 | } | 4525 | } |
4362 | 4526 | ||
@@ -4446,18 +4610,24 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) | |||
4446 | { | 4610 | { |
4447 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 4611 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
4448 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; | 4612 | struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; |
4449 | dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); | 4613 | dma_addr_t dma_addr = ipr_cmd->dma_addr; |
4450 | 4614 | ||
4451 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); | 4615 | memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); |
4452 | ioarcb->write_data_transfer_length = 0; | 4616 | ioarcb->data_transfer_length = 0; |
4453 | ioarcb->read_data_transfer_length = 0; | 4617 | ioarcb->read_data_transfer_length = 0; |
4454 | ioarcb->write_ioadl_len = 0; | 4618 | ioarcb->ioadl_len = 0; |
4455 | ioarcb->read_ioadl_len = 0; | 4619 | ioarcb->read_ioadl_len = 0; |
4456 | ioasa->ioasc = 0; | 4620 | ioasa->ioasc = 0; |
4457 | ioasa->residual_data_len = 0; | 4621 | ioasa->residual_data_len = 0; |
4458 | ioarcb->write_ioadl_addr = | 4622 | |
4459 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); | 4623 | if (ipr_cmd->ioa_cfg->sis64) |
4460 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 4624 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
4625 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); | ||
4626 | else { | ||
4627 | ioarcb->write_ioadl_addr = | ||
4628 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); | ||
4629 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | ||
4630 | } | ||
4461 | } | 4631 | } |
4462 | 4632 | ||
4463 | /** | 4633 | /** |
@@ -4489,15 +4659,8 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) | |||
4489 | cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; | 4659 | cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; |
4490 | cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); | 4660 | cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); |
4491 | 4661 | ||
4492 | ipr_cmd->ioadl[0].flags_and_data_len = | 4662 | ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, |
4493 | cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); | 4663 | SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); |
4494 | ipr_cmd->ioadl[0].address = | ||
4495 | cpu_to_be32(ipr_cmd->sense_buffer_dma); | ||
4496 | |||
4497 | ipr_cmd->ioarcb.read_ioadl_len = | ||
4498 | cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | ||
4499 | ipr_cmd->ioarcb.read_data_transfer_length = | ||
4500 | cpu_to_be32(SCSI_SENSE_BUFFERSIZE); | ||
4501 | 4664 | ||
4502 | ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, | 4665 | ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, |
4503 | IPR_REQUEST_SENSE_TIMEOUT * 2); | 4666 | IPR_REQUEST_SENSE_TIMEOUT * 2); |
@@ -4916,13 +5079,16 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, | |||
4916 | (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) | 5079 | (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) |
4917 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; | 5080 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; |
4918 | 5081 | ||
4919 | if (likely(rc == 0)) | 5082 | if (likely(rc == 0)) { |
4920 | rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); | 5083 | if (ioa_cfg->sis64) |
5084 | rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); | ||
5085 | else | ||
5086 | rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); | ||
5087 | } | ||
4921 | 5088 | ||
4922 | if (likely(rc == 0)) { | 5089 | if (likely(rc == 0)) { |
4923 | mb(); | 5090 | mb(); |
4924 | writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), | 5091 | ipr_send_command(ipr_cmd); |
4925 | ioa_cfg->regs.ioarrin_reg); | ||
4926 | } else { | 5092 | } else { |
4927 | list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); | 5093 | list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); |
4928 | return SCSI_MLQUEUE_HOST_BUSY; | 5094 | return SCSI_MLQUEUE_HOST_BUSY; |
@@ -5146,6 +5312,52 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) | |||
5146 | } | 5312 | } |
5147 | 5313 | ||
5148 | /** | 5314 | /** |
5315 | * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list | ||
5316 | * @ipr_cmd: ipr command struct | ||
5317 | * @qc: ATA queued command | ||
5318 | * | ||
5319 | **/ | ||
5320 | static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, | ||
5321 | struct ata_queued_cmd *qc) | ||
5322 | { | ||
5323 | u32 ioadl_flags = 0; | ||
5324 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | ||
5325 | struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; | ||
5326 | struct ipr_ioadl64_desc *last_ioadl64 = NULL; | ||
5327 | int len = qc->nbytes; | ||
5328 | struct scatterlist *sg; | ||
5329 | unsigned int si; | ||
5330 | dma_addr_t dma_addr = ipr_cmd->dma_addr; | ||
5331 | |||
5332 | if (len == 0) | ||
5333 | return; | ||
5334 | |||
5335 | if (qc->dma_dir == DMA_TO_DEVICE) { | ||
5336 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; | ||
5337 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | ||
5338 | } else if (qc->dma_dir == DMA_FROM_DEVICE) | ||
5339 | ioadl_flags = IPR_IOADL_FLAGS_READ; | ||
5340 | |||
5341 | ioarcb->data_transfer_length = cpu_to_be32(len); | ||
5342 | ioarcb->ioadl_len = | ||
5343 | cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); | ||
5344 | ioarcb->u.sis64_addr_data.data_ioadl_addr = | ||
5345 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); | ||
5346 | |||
5347 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
5348 | ioadl64->flags = cpu_to_be32(ioadl_flags); | ||
5349 | ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); | ||
5350 | ioadl64->address = cpu_to_be64(sg_dma_address(sg)); | ||
5351 | |||
5352 | last_ioadl64 = ioadl64; | ||
5353 | ioadl64++; | ||
5354 | } | ||
5355 | |||
5356 | if (likely(last_ioadl64)) | ||
5357 | last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); | ||
5358 | } | ||
5359 | |||
5360 | /** | ||
5149 | * ipr_build_ata_ioadl - Build an ATA scatter/gather list | 5361 | * ipr_build_ata_ioadl - Build an ATA scatter/gather list |
5150 | * @ipr_cmd: ipr command struct | 5362 | * @ipr_cmd: ipr command struct |
5151 | * @qc: ATA queued command | 5363 | * @qc: ATA queued command |
@@ -5156,7 +5368,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, | |||
5156 | { | 5368 | { |
5157 | u32 ioadl_flags = 0; | 5369 | u32 ioadl_flags = 0; |
5158 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 5370 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
5159 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | 5371 | struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; |
5160 | struct ipr_ioadl_desc *last_ioadl = NULL; | 5372 | struct ipr_ioadl_desc *last_ioadl = NULL; |
5161 | int len = qc->nbytes; | 5373 | int len = qc->nbytes; |
5162 | struct scatterlist *sg; | 5374 | struct scatterlist *sg; |
@@ -5168,8 +5380,8 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, | |||
5168 | if (qc->dma_dir == DMA_TO_DEVICE) { | 5380 | if (qc->dma_dir == DMA_TO_DEVICE) { |
5169 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; | 5381 | ioadl_flags = IPR_IOADL_FLAGS_WRITE; |
5170 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; | 5382 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; |
5171 | ioarcb->write_data_transfer_length = cpu_to_be32(len); | 5383 | ioarcb->data_transfer_length = cpu_to_be32(len); |
5172 | ioarcb->write_ioadl_len = | 5384 | ioarcb->ioadl_len = |
5173 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); | 5385 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); |
5174 | } else if (qc->dma_dir == DMA_FROM_DEVICE) { | 5386 | } else if (qc->dma_dir == DMA_FROM_DEVICE) { |
5175 | ioadl_flags = IPR_IOADL_FLAGS_READ; | 5387 | ioadl_flags = IPR_IOADL_FLAGS_READ; |
@@ -5212,10 +5424,15 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) | |||
5212 | 5424 | ||
5213 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); | 5425 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); |
5214 | ioarcb = &ipr_cmd->ioarcb; | 5426 | ioarcb = &ipr_cmd->ioarcb; |
5215 | regs = &ioarcb->add_data.u.regs; | ||
5216 | 5427 | ||
5217 | memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data)); | 5428 | if (ioa_cfg->sis64) { |
5218 | ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs)); | 5429 | regs = &ipr_cmd->i.ata_ioadl.regs; |
5430 | ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); | ||
5431 | } else | ||
5432 | regs = &ioarcb->u.add_data.u.regs; | ||
5433 | |||
5434 | memset(regs, 0, sizeof(*regs)); | ||
5435 | ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); | ||
5219 | 5436 | ||
5220 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); | 5437 | list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); |
5221 | ipr_cmd->qc = qc; | 5438 | ipr_cmd->qc = qc; |
@@ -5226,7 +5443,11 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) | |||
5226 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; | 5443 | ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; |
5227 | ipr_cmd->dma_use_sg = qc->n_elem; | 5444 | ipr_cmd->dma_use_sg = qc->n_elem; |
5228 | 5445 | ||
5229 | ipr_build_ata_ioadl(ipr_cmd, qc); | 5446 | if (ioa_cfg->sis64) |
5447 | ipr_build_ata_ioadl64(ipr_cmd, qc); | ||
5448 | else | ||
5449 | ipr_build_ata_ioadl(ipr_cmd, qc); | ||
5450 | |||
5230 | regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; | 5451 | regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; |
5231 | ipr_copy_sata_tf(regs, &qc->tf); | 5452 | ipr_copy_sata_tf(regs, &qc->tf); |
5232 | memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); | 5453 | memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); |
@@ -5257,8 +5478,9 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) | |||
5257 | } | 5478 | } |
5258 | 5479 | ||
5259 | mb(); | 5480 | mb(); |
5260 | writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr), | 5481 | |
5261 | ioa_cfg->regs.ioarrin_reg); | 5482 | ipr_send_command(ipr_cmd); |
5483 | |||
5262 | return 0; | 5484 | return 0; |
5263 | } | 5485 | } |
5264 | 5486 | ||
@@ -5459,7 +5681,7 @@ static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, | |||
5459 | * ipr_set_supported_devs - Send Set Supported Devices for a device | 5681 | * ipr_set_supported_devs - Send Set Supported Devices for a device |
5460 | * @ipr_cmd: ipr command struct | 5682 | * @ipr_cmd: ipr command struct |
5461 | * | 5683 | * |
5462 | * This function send a Set Supported Devices to the adapter | 5684 | * This function sends a Set Supported Devices to the adapter |
5463 | * | 5685 | * |
5464 | * Return value: | 5686 | * Return value: |
5465 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN | 5687 | * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN |
@@ -5468,7 +5690,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) | |||
5468 | { | 5690 | { |
5469 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 5691 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
5470 | struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; | 5692 | struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; |
5471 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | ||
5472 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 5693 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
5473 | struct ipr_resource_entry *res = ipr_cmd->u.res; | 5694 | struct ipr_resource_entry *res = ipr_cmd->u.res; |
5474 | 5695 | ||
@@ -5489,13 +5710,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) | |||
5489 | ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; | 5710 | ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; |
5490 | ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; | 5711 | ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; |
5491 | 5712 | ||
5492 | ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | | 5713 | ipr_init_ioadl(ipr_cmd, |
5493 | sizeof(struct ipr_supported_device)); | 5714 | ioa_cfg->vpd_cbs_dma + |
5494 | ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + | 5715 | offsetof(struct ipr_misc_cbs, supp_dev), |
5495 | offsetof(struct ipr_misc_cbs, supp_dev)); | 5716 | sizeof(struct ipr_supported_device), |
5496 | ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | 5717 | IPR_IOADL_FLAGS_WRITE_LAST); |
5497 | ioarcb->write_data_transfer_length = | ||
5498 | cpu_to_be32(sizeof(struct ipr_supported_device)); | ||
5499 | 5718 | ||
5500 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, | 5719 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, |
5501 | IPR_SET_SUP_DEVICE_TIMEOUT); | 5720 | IPR_SET_SUP_DEVICE_TIMEOUT); |
@@ -5695,10 +5914,9 @@ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, | |||
5695 | * none | 5914 | * none |
5696 | **/ | 5915 | **/ |
5697 | static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, | 5916 | static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, |
5698 | __be32 res_handle, u8 parm, u32 dma_addr, | 5917 | __be32 res_handle, u8 parm, |
5699 | u8 xfer_len) | 5918 | dma_addr_t dma_addr, u8 xfer_len) |
5700 | { | 5919 | { |
5701 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | ||
5702 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 5920 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
5703 | 5921 | ||
5704 | ioarcb->res_handle = res_handle; | 5922 | ioarcb->res_handle = res_handle; |
@@ -5708,11 +5926,7 @@ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, | |||
5708 | ioarcb->cmd_pkt.cdb[1] = parm; | 5926 | ioarcb->cmd_pkt.cdb[1] = parm; |
5709 | ioarcb->cmd_pkt.cdb[4] = xfer_len; | 5927 | ioarcb->cmd_pkt.cdb[4] = xfer_len; |
5710 | 5928 | ||
5711 | ioadl->flags_and_data_len = | 5929 | ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); |
5712 | cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len); | ||
5713 | ioadl->address = cpu_to_be32(dma_addr); | ||
5714 | ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | ||
5715 | ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len); | ||
5716 | } | 5930 | } |
5717 | 5931 | ||
5718 | /** | 5932 | /** |
@@ -5762,9 +5976,8 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) | |||
5762 | **/ | 5976 | **/ |
5763 | static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, | 5977 | static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, |
5764 | __be32 res_handle, | 5978 | __be32 res_handle, |
5765 | u8 parm, u32 dma_addr, u8 xfer_len) | 5979 | u8 parm, dma_addr_t dma_addr, u8 xfer_len) |
5766 | { | 5980 | { |
5767 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | ||
5768 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 5981 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
5769 | 5982 | ||
5770 | ioarcb->res_handle = res_handle; | 5983 | ioarcb->res_handle = res_handle; |
@@ -5773,11 +5986,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, | |||
5773 | ioarcb->cmd_pkt.cdb[4] = xfer_len; | 5986 | ioarcb->cmd_pkt.cdb[4] = xfer_len; |
5774 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; | 5987 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; |
5775 | 5988 | ||
5776 | ioadl->flags_and_data_len = | 5989 | ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); |
5777 | cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); | ||
5778 | ioadl->address = cpu_to_be32(dma_addr); | ||
5779 | ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | ||
5780 | ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); | ||
5781 | } | 5990 | } |
5782 | 5991 | ||
5783 | /** | 5992 | /** |
@@ -6033,7 +6242,6 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) | |||
6033 | { | 6242 | { |
6034 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | 6243 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; |
6035 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 6244 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
6036 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | ||
6037 | struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; | 6245 | struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; |
6038 | struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; | 6246 | struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; |
6039 | 6247 | ||
@@ -6050,13 +6258,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) | |||
6050 | ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; | 6258 | ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; |
6051 | ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; | 6259 | ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; |
6052 | 6260 | ||
6053 | ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | 6261 | ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, |
6054 | ioarcb->read_data_transfer_length = | 6262 | sizeof(struct ipr_config_table), |
6055 | cpu_to_be32(sizeof(struct ipr_config_table)); | 6263 | IPR_IOADL_FLAGS_READ_LAST); |
6056 | |||
6057 | ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma); | ||
6058 | ioadl->flags_and_data_len = | ||
6059 | cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table)); | ||
6060 | 6264 | ||
6061 | ipr_cmd->job_step = ipr_init_res_table; | 6265 | ipr_cmd->job_step = ipr_init_res_table; |
6062 | 6266 | ||
@@ -6076,10 +6280,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) | |||
6076 | * none | 6280 | * none |
6077 | **/ | 6281 | **/ |
6078 | static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, | 6282 | static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, |
6079 | u32 dma_addr, u8 xfer_len) | 6283 | dma_addr_t dma_addr, u8 xfer_len) |
6080 | { | 6284 | { |
6081 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; | 6285 | struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; |
6082 | struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; | ||
6083 | 6286 | ||
6084 | ENTER; | 6287 | ENTER; |
6085 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; | 6288 | ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; |
@@ -6090,12 +6293,7 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, | |||
6090 | ioarcb->cmd_pkt.cdb[2] = page; | 6293 | ioarcb->cmd_pkt.cdb[2] = page; |
6091 | ioarcb->cmd_pkt.cdb[4] = xfer_len; | 6294 | ioarcb->cmd_pkt.cdb[4] = xfer_len; |
6092 | 6295 | ||
6093 | ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); | 6296 | ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); |
6094 | ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); | ||
6095 | |||
6096 | ioadl->address = cpu_to_be32(dma_addr); | ||
6097 | ioadl->flags_and_data_len = | ||
6098 | cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); | ||
6099 | 6297 | ||
6100 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); | 6298 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); |
6101 | LEAVE; | 6299 | LEAVE; |
@@ -6785,7 +6983,10 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) | |||
6785 | ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; | 6983 | ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; |
6786 | ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; | 6984 | ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; |
6787 | 6985 | ||
6788 | ipr_build_ucode_ioadl(ipr_cmd, sglist); | 6986 | if (ioa_cfg->sis64) |
6987 | ipr_build_ucode_ioadl64(ipr_cmd, sglist); | ||
6988 | else | ||
6989 | ipr_build_ucode_ioadl(ipr_cmd, sglist); | ||
6789 | ipr_cmd->job_step = ipr_reset_ucode_download_done; | 6990 | ipr_cmd->job_step = ipr_reset_ucode_download_done; |
6790 | 6991 | ||
6791 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, | 6992 | ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, |
@@ -7209,7 +7410,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) | |||
7209 | int i; | 7410 | int i; |
7210 | 7411 | ||
7211 | ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, | 7412 | ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, |
7212 | sizeof(struct ipr_cmnd), 8, 0); | 7413 | sizeof(struct ipr_cmnd), 16, 0); |
7213 | 7414 | ||
7214 | if (!ioa_cfg->ipr_cmd_pool) | 7415 | if (!ioa_cfg->ipr_cmd_pool) |
7215 | return -ENOMEM; | 7416 | return -ENOMEM; |
@@ -7227,13 +7428,25 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) | |||
7227 | ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; | 7428 | ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; |
7228 | 7429 | ||
7229 | ioarcb = &ipr_cmd->ioarcb; | 7430 | ioarcb = &ipr_cmd->ioarcb; |
7230 | ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); | 7431 | ipr_cmd->dma_addr = dma_addr; |
7432 | if (ioa_cfg->sis64) | ||
7433 | ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); | ||
7434 | else | ||
7435 | ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); | ||
7436 | |||
7231 | ioarcb->host_response_handle = cpu_to_be32(i << 2); | 7437 | ioarcb->host_response_handle = cpu_to_be32(i << 2); |
7232 | ioarcb->write_ioadl_addr = | 7438 | if (ioa_cfg->sis64) { |
7233 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); | 7439 | ioarcb->u.sis64_addr_data.data_ioadl_addr = |
7234 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | 7440 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); |
7235 | ioarcb->ioasa_host_pci_addr = | 7441 | ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = |
7236 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); | 7442 | cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); |
7443 | } else { | ||
7444 | ioarcb->write_ioadl_addr = | ||
7445 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); | ||
7446 | ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; | ||
7447 | ioarcb->ioasa_host_pci_addr = | ||
7448 | cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); | ||
7449 | } | ||
7237 | ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); | 7450 | ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); |
7238 | ipr_cmd->cmd_index = i; | 7451 | ipr_cmd->cmd_index = i; |
7239 | ipr_cmd->ioa_cfg = ioa_cfg; | 7452 | ipr_cmd->ioa_cfg = ioa_cfg; |
@@ -7578,6 +7791,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7578 | goto out_scsi_host_put; | 7791 | goto out_scsi_host_put; |
7579 | } | 7792 | } |
7580 | 7793 | ||
7794 | /* set SIS 32 or SIS 64 */ | ||
7795 | ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; | ||
7581 | ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; | 7796 | ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; |
7582 | 7797 | ||
7583 | if (ipr_transop_timeout) | 7798 | if (ipr_transop_timeout) |
@@ -7615,7 +7830,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, | |||
7615 | 7830 | ||
7616 | pci_set_master(pdev); | 7831 | pci_set_master(pdev); |
7617 | 7832 | ||
7618 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 7833 | if (ioa_cfg->sis64) { |
7834 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
7835 | if (rc < 0) { | ||
7836 | dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); | ||
7837 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
7838 | } | ||
7839 | |||
7840 | } else | ||
7841 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
7842 | |||
7619 | if (rc < 0) { | 7843 | if (rc < 0) { |
7620 | dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); | 7844 | dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); |
7621 | goto cleanup_nomem; | 7845 | goto cleanup_nomem; |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 19bbcf39f0c9..64e41df2a196 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -381,7 +381,7 @@ struct ipr_cmd_pkt { | |||
381 | #define IPR_RQTYPE_HCAM 0x02 | 381 | #define IPR_RQTYPE_HCAM 0x02 |
382 | #define IPR_RQTYPE_ATA_PASSTHRU 0x04 | 382 | #define IPR_RQTYPE_ATA_PASSTHRU 0x04 |
383 | 383 | ||
384 | u8 luntar_luntrn; | 384 | u8 reserved2; |
385 | 385 | ||
386 | u8 flags_hi; | 386 | u8 flags_hi; |
387 | #define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 | 387 | #define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 |
@@ -403,7 +403,7 @@ struct ipr_cmd_pkt { | |||
403 | __be16 timeout; | 403 | __be16 timeout; |
404 | }__attribute__ ((packed, aligned(4))); | 404 | }__attribute__ ((packed, aligned(4))); |
405 | 405 | ||
406 | struct ipr_ioarcb_ata_regs { | 406 | struct ipr_ioarcb_ata_regs { /* 22 bytes */ |
407 | u8 flags; | 407 | u8 flags; |
408 | #define IPR_ATA_FLAG_PACKET_CMD 0x80 | 408 | #define IPR_ATA_FLAG_PACKET_CMD 0x80 |
409 | #define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40 | 409 | #define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40 |
@@ -442,28 +442,49 @@ struct ipr_ioadl_desc { | |||
442 | __be32 address; | 442 | __be32 address; |
443 | }__attribute__((packed, aligned (8))); | 443 | }__attribute__((packed, aligned (8))); |
444 | 444 | ||
445 | struct ipr_ioadl64_desc { | ||
446 | __be32 flags; | ||
447 | __be32 data_len; | ||
448 | __be64 address; | ||
449 | }__attribute__((packed, aligned (16))); | ||
450 | |||
451 | struct ipr_ata64_ioadl { | ||
452 | struct ipr_ioarcb_ata_regs regs; | ||
453 | u16 reserved[5]; | ||
454 | struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; | ||
455 | }__attribute__((packed, aligned (16))); | ||
456 | |||
445 | struct ipr_ioarcb_add_data { | 457 | struct ipr_ioarcb_add_data { |
446 | union { | 458 | union { |
447 | struct ipr_ioarcb_ata_regs regs; | 459 | struct ipr_ioarcb_ata_regs regs; |
448 | struct ipr_ioadl_desc ioadl[5]; | 460 | struct ipr_ioadl_desc ioadl[5]; |
449 | __be32 add_cmd_parms[10]; | 461 | __be32 add_cmd_parms[10]; |
450 | }u; | 462 | } u; |
451 | }__attribute__ ((packed, aligned(4))); | 463 | }__attribute__ ((packed, aligned (4))); |
464 | |||
465 | struct ipr_ioarcb_sis64_add_addr_ecb { | ||
466 | __be64 ioasa_host_pci_addr; | ||
467 | __be64 data_ioadl_addr; | ||
468 | __be64 reserved; | ||
469 | __be32 ext_control_buf[4]; | ||
470 | }__attribute__((packed, aligned (8))); | ||
452 | 471 | ||
453 | /* IOA Request Control Block 128 bytes */ | 472 | /* IOA Request Control Block 128 bytes */ |
454 | struct ipr_ioarcb { | 473 | struct ipr_ioarcb { |
455 | __be32 ioarcb_host_pci_addr; | 474 | union { |
456 | __be32 reserved; | 475 | __be32 ioarcb_host_pci_addr; |
476 | __be64 ioarcb_host_pci_addr64; | ||
477 | } a; | ||
457 | __be32 res_handle; | 478 | __be32 res_handle; |
458 | __be32 host_response_handle; | 479 | __be32 host_response_handle; |
459 | __be32 reserved1; | 480 | __be32 reserved1; |
460 | __be32 reserved2; | 481 | __be32 reserved2; |
461 | __be32 reserved3; | 482 | __be32 reserved3; |
462 | 483 | ||
463 | __be32 write_data_transfer_length; | 484 | __be32 data_transfer_length; |
464 | __be32 read_data_transfer_length; | 485 | __be32 read_data_transfer_length; |
465 | __be32 write_ioadl_addr; | 486 | __be32 write_ioadl_addr; |
466 | __be32 write_ioadl_len; | 487 | __be32 ioadl_len; |
467 | __be32 read_ioadl_addr; | 488 | __be32 read_ioadl_addr; |
468 | __be32 read_ioadl_len; | 489 | __be32 read_ioadl_len; |
469 | 490 | ||
@@ -473,8 +494,14 @@ struct ipr_ioarcb { | |||
473 | 494 | ||
474 | struct ipr_cmd_pkt cmd_pkt; | 495 | struct ipr_cmd_pkt cmd_pkt; |
475 | 496 | ||
476 | __be32 add_cmd_parms_len; | 497 | __be16 add_cmd_parms_offset; |
477 | struct ipr_ioarcb_add_data add_data; | 498 | __be16 add_cmd_parms_len; |
499 | |||
500 | union { | ||
501 | struct ipr_ioarcb_add_data add_data; | ||
502 | struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data; | ||
503 | } u; | ||
504 | |||
478 | }__attribute__((packed, aligned (4))); | 505 | }__attribute__((packed, aligned (4))); |
479 | 506 | ||
480 | struct ipr_ioasa_vset { | 507 | struct ipr_ioasa_vset { |
@@ -1029,6 +1056,9 @@ struct ipr_chip_t { | |||
1029 | u16 intr_type; | 1056 | u16 intr_type; |
1030 | #define IPR_USE_LSI 0x00 | 1057 | #define IPR_USE_LSI 0x00 |
1031 | #define IPR_USE_MSI 0x01 | 1058 | #define IPR_USE_MSI 0x01 |
1059 | u16 sis_type; | ||
1060 | #define IPR_SIS32 0x00 | ||
1061 | #define IPR_SIS64 0x01 | ||
1032 | const struct ipr_chip_cfg_t *cfg; | 1062 | const struct ipr_chip_cfg_t *cfg; |
1033 | }; | 1063 | }; |
1034 | 1064 | ||
@@ -1099,6 +1129,7 @@ struct ipr_ioa_cfg { | |||
1099 | u8 dual_raid:1; | 1129 | u8 dual_raid:1; |
1100 | u8 needs_warm_reset:1; | 1130 | u8 needs_warm_reset:1; |
1101 | u8 msi_received:1; | 1131 | u8 msi_received:1; |
1132 | u8 sis64:1; | ||
1102 | 1133 | ||
1103 | u8 revid; | 1134 | u8 revid; |
1104 | 1135 | ||
@@ -1202,13 +1233,17 @@ struct ipr_ioa_cfg { | |||
1202 | char ipr_cmd_label[8]; | 1233 | char ipr_cmd_label[8]; |
1203 | #define IPR_CMD_LABEL "ipr_cmd" | 1234 | #define IPR_CMD_LABEL "ipr_cmd" |
1204 | struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; | 1235 | struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; |
1205 | u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; | 1236 | dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; |
1206 | }; | 1237 | }; |
1207 | 1238 | ||
1208 | struct ipr_cmnd { | 1239 | struct ipr_cmnd { |
1209 | struct ipr_ioarcb ioarcb; | 1240 | struct ipr_ioarcb ioarcb; |
1241 | union { | ||
1242 | struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; | ||
1243 | struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; | ||
1244 | struct ipr_ata64_ioadl ata_ioadl; | ||
1245 | } i; | ||
1210 | struct ipr_ioasa ioasa; | 1246 | struct ipr_ioasa ioasa; |
1211 | struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; | ||
1212 | struct list_head queue; | 1247 | struct list_head queue; |
1213 | struct scsi_cmnd *scsi_cmd; | 1248 | struct scsi_cmnd *scsi_cmd; |
1214 | struct ata_queued_cmd *qc; | 1249 | struct ata_queued_cmd *qc; |
@@ -1221,7 +1256,7 @@ struct ipr_cmnd { | |||
1221 | u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; | 1256 | u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; |
1222 | dma_addr_t sense_buffer_dma; | 1257 | dma_addr_t sense_buffer_dma; |
1223 | unsigned short dma_use_sg; | 1258 | unsigned short dma_use_sg; |
1224 | dma_addr_t dma_handle; | 1259 | dma_addr_t dma_addr; |
1225 | struct ipr_cmnd *sibling; | 1260 | struct ipr_cmnd *sibling; |
1226 | union { | 1261 | union { |
1227 | enum ipr_shutdown_type shutdown_type; | 1262 | enum ipr_shutdown_type shutdown_type; |