aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-21 14:51:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-21 14:51:42 -0500
commitcdc194705d26fdd7fc5446b5d830f2bbe2b22c30 (patch)
tree91a643f38d490e092855792576a7e903a419cfe1
parent772c8f6f3bbd3ceb94a89373473083e3e1113554 (diff)
parentd1da522fb8a70b8c527d4ad15f9e62218cc00f2c (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This update includes the usual round of major driver updates (ncr5380, ufs, lpfc, be2iscsi, hisi_sas, storvsc, cxlflash, aacraid, megaraid_sas, ...). There's also an assortment of minor fixes and the major update of switching a bunch of drivers to pci_alloc_irq_vectors from Christoph" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (188 commits) scsi: megaraid_sas: handle dma_addr_t right on 32-bit scsi: megaraid_sas: array overflow in megasas_dump_frame() scsi: snic: switch to pci_irq_alloc_vectors scsi: megaraid_sas: driver version upgrade scsi: megaraid_sas: Change RAID_1_10_RMW_CMDS to RAID_1_PEER_CMDS and set value to 2 scsi: megaraid_sas: Indentation and smatch warning fixes scsi: megaraid_sas: Cleanup VD_EXT_DEBUG and SPAN_DEBUG related debug prints scsi: megaraid_sas: Increase internal command pool scsi: megaraid_sas: Use synchronize_irq to wait for IRQs to complete scsi: megaraid_sas: Bail out the driver load if ld_list_query fails scsi: megaraid_sas: Change build_mpt_mfi_pass_thru to return void scsi: megaraid_sas: During OCR, if get_ctrl_info fails do not continue with OCR scsi: megaraid_sas: Do not set fp_possible if TM capable for non-RW syspdIO, change fp_possible to bool scsi: megaraid_sas: Remove unused pd_index from megasas_build_ld_nonrw_fusion scsi: megaraid_sas: megasas_return_cmd does not memset IO frame to zero scsi: megaraid_sas: max_fw_cmds are decremented twice, remove duplicate scsi: megaraid_sas: update can_queue only if the new value is less scsi: megaraid_sas: Change max_cmd from u32 to u16 in all functions scsi: megaraid_sas: set pd_after_lb from MR_BuildRaidContext and initialize pDevHandle to MR_DEVHANDLE_INVALID scsi: megaraid_sas: latest controller OCR capability from FW before sending shutdown DCMD ...
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-transport.c1
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/block/cciss.h30
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/message/fusion/mptfc.c1
-rw-r--r--drivers/message/fusion/mptlan.h1
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/scsi/NCR5380.c64
-rw-r--r--drivers/scsi/NCR5380.h17
-rw-r--r--drivers/scsi/aacraid/aachba.c1288
-rw-r--r--drivers/scsi/aacraid/aacraid.h644
-rw-r--r--drivers/scsi/aacraid/commctrl.c342
-rw-r--r--drivers/scsi/aacraid/comminit.c330
-rw-r--r--drivers/scsi/aacraid/commsup.c964
-rw-r--r--drivers/scsi/aacraid/dpcsup.c159
-rw-r--r--drivers/scsi/aacraid/linit.c562
-rw-r--r--drivers/scsi/aacraid/nark.c3
-rw-r--r--drivers/scsi/aacraid/rkt.c5
-rw-r--r--drivers/scsi/aacraid/rx.c17
-rw-r--r--drivers/scsi/aacraid/sa.c9
-rw-r--r--drivers/scsi/aacraid/src.c336
-rw-r--r--drivers/scsi/atari_scsi.c36
-rw-r--r--drivers/scsi/be2iscsi/be.h3
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c41
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h17
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c165
-rw-r--r--drivers/scsi/be2iscsi/be_main.c345
-rw-r--r--drivers/scsi/be2iscsi/be_main.h44
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c117
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h98
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c181
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h4
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c1
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxlflash/common.h32
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c31
-rw-r--r--drivers/scsi/cxlflash/main.c465
-rw-r--r--drivers/scsi/cxlflash/sislite.h19
-rw-r--r--drivers/scsi/cxlflash/superpipe.c183
-rw-r--r--drivers/scsi/cxlflash/vlun.c169
-rw-r--r--drivers/scsi/dpt_i2o.c8
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_log.h4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/g_NCR5380.c45
-rw-r--r--drivers/scsi/g_NCR5380.h56
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c23
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c135
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa.h40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c1
-rw-r--r--drivers/scsi/iscsi_tcp.c1
-rw-r--r--drivers/scsi/libiscsi.c5
-rw-r--r--drivers/scsi/libsas/sas_init.c1
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c7
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
-rw-r--r--drivers/scsi/mac_scsi.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h199
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c648
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c468
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c1334
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h412
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c20
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c56
-rw-r--r--drivers/scsi/mvumi.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c35
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pmcraid.c92
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c9
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c6
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c26
-rw-r--r--drivers/scsi/scsi_transport_srp.c21
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/snic/snic.h1
-rw-r--r--drivers/scsi/snic/snic_isr.c48
-rw-r--r--drivers/scsi/storvsc_drv.c160
-rw-r--r--drivers/scsi/sun3_scsi.c83
-rw-r--r--drivers/scsi/sun3_scsi.h102
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c49
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufs.h12
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h28
-rw-r--r--drivers/scsi/ufs/ufshcd.c1578
-rw-r--r--drivers/scsi/ufs/ufshcd.h121
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/vmw_pvscsi.c104
-rw-r--r--drivers/scsi/vmw_pvscsi.h5
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/scsi/libiscsi.h1
-rw-r--r--include/scsi/scsi.h10
-rw-r--r--include/scsi/scsi_transport.h23
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--include/scsi/scsi_transport_srp.h8
-rw-r--r--include/trace/events/ufs.h263
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h1
129 files changed, 9294 insertions, 3928 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 0e1ec37070d1..50ee10db160f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -549,6 +549,7 @@ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
549 DPRINTK("EXIT, ret=%d\n", ret); 549 DPRINTK("EXIT, ret=%d\n", ret);
550 return ret; 550 return ret;
551} 551}
552EXPORT_SYMBOL(ata_scsi_timed_out);
552 553
553static void ata_eh_unload(struct ata_port *ap) 554static void ata_eh_unload(struct ata_port *ap)
554{ 555{
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 7ef16c085058..46698232e6bf 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -716,7 +716,6 @@ struct scsi_transport_template *ata_attach_transport(void)
716 return NULL; 716 return NULL;
717 717
718 i->t.eh_strategy_handler = ata_scsi_error; 718 i->t.eh_strategy_handler = ata_scsi_error;
719 i->t.eh_timed_out = ata_scsi_timed_out;
720 i->t.user_scan = ata_scsi_user_scan; 719 i->t.user_scan = ata_scsi_user_scan;
721 720
722 i->t.host_attrs.ac.attrs = &i->port_attrs[0]; 721 i->t.host_attrs.ac.attrs = &i->port_attrs[0];
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 8f3a5596dd67..06d479d1f302 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -159,7 +159,6 @@ extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
159extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); 159extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
160extern void ata_eh_acquire(struct ata_port *ap); 160extern void ata_eh_acquire(struct ata_port *ap);
161extern void ata_eh_release(struct ata_port *ap); 161extern void ata_eh_release(struct ata_port *ap);
162extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
163extern void ata_scsi_error(struct Scsi_Host *host); 162extern void ata_scsi_error(struct Scsi_Host *host);
164extern void ata_eh_fastdrain_timerfn(unsigned long arg); 163extern void ata_eh_fastdrain_timerfn(unsigned long arg);
165extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); 164extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 4affa94ca17b..24b5fd75501a 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -400,27 +400,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
400} 400}
401 401
402static struct access_method SA5_access = { 402static struct access_method SA5_access = {
403 SA5_submit_command, 403 .submit_command = SA5_submit_command,
404 SA5_intr_mask, 404 .set_intr_mask = SA5_intr_mask,
405 SA5_fifo_full, 405 .fifo_full = SA5_fifo_full,
406 SA5_intr_pending, 406 .intr_pending = SA5_intr_pending,
407 SA5_completed, 407 .command_completed = SA5_completed,
408}; 408};
409 409
410static struct access_method SA5B_access = { 410static struct access_method SA5B_access = {
411 SA5_submit_command, 411 .submit_command = SA5_submit_command,
412 SA5B_intr_mask, 412 .set_intr_mask = SA5B_intr_mask,
413 SA5_fifo_full, 413 .fifo_full = SA5_fifo_full,
414 SA5B_intr_pending, 414 .intr_pending = SA5B_intr_pending,
415 SA5_completed, 415 .command_completed = SA5_completed,
416}; 416};
417 417
418static struct access_method SA5_performant_access = { 418static struct access_method SA5_performant_access = {
419 SA5_submit_command, 419 .submit_command = SA5_submit_command,
420 SA5_performant_intr_mask, 420 .set_intr_mask = SA5_performant_intr_mask,
421 SA5_fifo_full, 421 .fifo_full = SA5_fifo_full,
422 SA5_performant_intr_pending, 422 .intr_pending = SA5_performant_intr_pending,
423 SA5_performant_completed, 423 .command_completed = SA5_performant_completed,
424}; 424};
425 425
426struct board_type { 426struct board_type {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index e71af717e71b..30a6985909e0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -994,6 +994,7 @@ static struct scsi_host_template iscsi_iser_sht = {
994 .change_queue_depth = scsi_change_queue_depth, 994 .change_queue_depth = scsi_change_queue_depth,
995 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, 995 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
996 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 996 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
997 .eh_timed_out = iscsi_eh_cmd_timed_out,
997 .eh_abort_handler = iscsi_eh_abort, 998 .eh_abort_handler = iscsi_eh_abort,
998 .eh_device_reset_handler= iscsi_eh_device_reset, 999 .eh_device_reset_handler= iscsi_eh_device_reset,
999 .eh_target_reset_handler = iscsi_eh_recover_target, 1000 .eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 79bf48477ddb..36529e390e48 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2869,6 +2869,7 @@ static struct scsi_host_template srp_template = {
2869 .info = srp_target_info, 2869 .info = srp_target_info,
2870 .queuecommand = srp_queuecommand, 2870 .queuecommand = srp_queuecommand,
2871 .change_queue_depth = srp_change_queue_depth, 2871 .change_queue_depth = srp_change_queue_depth,
2872 .eh_timed_out = srp_timed_out,
2872 .eh_abort_handler = srp_abort, 2873 .eh_abort_handler = srp_abort,
2873 .eh_device_reset_handler = srp_reset_device, 2874 .eh_device_reset_handler = srp_reset_device,
2874 .eh_host_reset_handler = srp_reset_host, 2875 .eh_host_reset_handler = srp_reset_host,
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index add6a3a6ef0d..98eafae78576 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -119,6 +119,7 @@ static struct scsi_host_template mptfc_driver_template = {
119 .target_destroy = mptfc_target_destroy, 119 .target_destroy = mptfc_target_destroy,
120 .slave_destroy = mptscsih_slave_destroy, 120 .slave_destroy = mptscsih_slave_destroy,
121 .change_queue_depth = mptscsih_change_queue_depth, 121 .change_queue_depth = mptscsih_change_queue_depth,
122 .eh_timed_out = fc_eh_timed_out,
122 .eh_abort_handler = mptfc_abort, 123 .eh_abort_handler = mptfc_abort,
123 .eh_device_reset_handler = mptfc_dev_reset, 124 .eh_device_reset_handler = mptfc_dev_reset,
124 .eh_bus_reset_handler = mptfc_bus_reset, 125 .eh_bus_reset_handler = mptfc_bus_reset,
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index 8946e19dbfc8..8a24494f8c4d 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -65,7 +65,6 @@
65#include <linux/init.h> 65#include <linux/init.h>
66#include <linux/kernel.h> 66#include <linux/kernel.h>
67#include <linux/slab.h> 67#include <linux/slab.h>
68#include <linux/miscdevice.h>
69#include <linux/spinlock.h> 68#include <linux/spinlock.h>
70#include <linux/workqueue.h> 69#include <linux/workqueue.h>
71#include <linux/delay.h> 70#include <linux/delay.h>
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b8c4b2ba7519..f6308ad35b19 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1983,6 +1983,7 @@ static struct scsi_host_template mptsas_driver_template = {
1983 .target_destroy = mptsas_target_destroy, 1983 .target_destroy = mptsas_target_destroy,
1984 .slave_destroy = mptscsih_slave_destroy, 1984 .slave_destroy = mptscsih_slave_destroy,
1985 .change_queue_depth = mptscsih_change_queue_depth, 1985 .change_queue_depth = mptscsih_change_queue_depth,
1986 .eh_timed_out = mptsas_eh_timed_out,
1986 .eh_abort_handler = mptscsih_abort, 1987 .eh_abort_handler = mptscsih_abort,
1987 .eh_device_reset_handler = mptscsih_dev_reset, 1988 .eh_device_reset_handler = mptscsih_dev_reset,
1988 .eh_host_reset_handler = mptscsih_host_reset, 1989 .eh_host_reset_handler = mptscsih_host_reset,
@@ -5398,7 +5399,6 @@ mptsas_init(void)
5398 sas_attach_transport(&mptsas_transport_functions); 5399 sas_attach_transport(&mptsas_transport_functions);
5399 if (!mptsas_transport_template) 5400 if (!mptsas_transport_template)
5400 return -ENODEV; 5401 return -ENODEV;
5401 mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
5402 5402
5403 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER, 5403 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
5404 "mptscsih_io_done"); 5404 "mptscsih_io_done");
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 07ffdbb5107f..0678cf714c0e 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -330,6 +330,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
330 .module = THIS_MODULE, 330 .module = THIS_MODULE,
331 .name = "zfcp", 331 .name = "zfcp",
332 .queuecommand = zfcp_scsi_queuecommand, 332 .queuecommand = zfcp_scsi_queuecommand,
333 .eh_timed_out = fc_eh_timed_out,
333 .eh_abort_handler = zfcp_scsi_eh_abort_handler, 334 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
334 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, 335 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
335 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, 336 .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 4f5ca794bb71..acc33440bca0 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -96,17 +96,6 @@
96 * of chips. To use it, you write an architecture specific functions 96 * of chips. To use it, you write an architecture specific functions
97 * and macros and include this file in your driver. 97 * and macros and include this file in your driver.
98 * 98 *
99 * These macros control options :
100 * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
101 * for commands that return with a CHECK CONDITION status.
102 *
103 * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
104 * transceivers.
105 *
106 * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
107 *
108 * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
109 *
110 * These macros MUST be defined : 99 * These macros MUST be defined :
111 * 100 *
112 * NCR5380_read(register) - read from the specified register 101 * NCR5380_read(register) - read from the specified register
@@ -347,7 +336,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
347#endif 336#endif
348 337
349/** 338/**
350 * NCR58380_info - report driver and host information 339 * NCR5380_info - report driver and host information
351 * @instance: relevant scsi host instance 340 * @instance: relevant scsi host instance
352 * 341 *
353 * For use as the host template info() handler. 342 * For use as the host template info() handler.
@@ -360,33 +349,6 @@ static const char *NCR5380_info(struct Scsi_Host *instance)
360 return hostdata->info; 349 return hostdata->info;
361} 350}
362 351
363static void prepare_info(struct Scsi_Host *instance)
364{
365 struct NCR5380_hostdata *hostdata = shost_priv(instance);
366
367 snprintf(hostdata->info, sizeof(hostdata->info),
368 "%s, irq %d, "
369 "io_port 0x%lx, base 0x%lx, "
370 "can_queue %d, cmd_per_lun %d, "
371 "sg_tablesize %d, this_id %d, "
372 "flags { %s%s%s}, "
373 "options { %s} ",
374 instance->hostt->name, instance->irq,
375 hostdata->io_port, hostdata->base,
376 instance->can_queue, instance->cmd_per_lun,
377 instance->sg_tablesize, instance->this_id,
378 hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
379 hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
380 hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "",
381#ifdef DIFFERENTIAL
382 "DIFFERENTIAL "
383#endif
384#ifdef PARITY
385 "PARITY "
386#endif
387 "");
388}
389
390/** 352/**
391 * NCR5380_init - initialise an NCR5380 353 * NCR5380_init - initialise an NCR5380
392 * @instance: adapter to configure 354 * @instance: adapter to configure
@@ -436,7 +398,14 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
436 if (!hostdata->work_q) 398 if (!hostdata->work_q)
437 return -ENOMEM; 399 return -ENOMEM;
438 400
439 prepare_info(instance); 401 snprintf(hostdata->info, sizeof(hostdata->info),
402 "%s, irq %d, io_port 0x%lx, base 0x%lx, can_queue %d, cmd_per_lun %d, sg_tablesize %d, this_id %d, flags { %s%s%s}",
403 instance->hostt->name, instance->irq, hostdata->io_port,
404 hostdata->base, instance->can_queue, instance->cmd_per_lun,
405 instance->sg_tablesize, instance->this_id,
406 hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
407 hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
408 hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "");
440 409
441 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 410 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
442 NCR5380_write(MODE_REG, MR_BASE); 411 NCR5380_write(MODE_REG, MR_BASE);
@@ -622,8 +591,9 @@ static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
622 list_empty(&hostdata->unissued) && 591 list_empty(&hostdata->unissued) &&
623 list_empty(&hostdata->autosense) && 592 list_empty(&hostdata->autosense) &&
624 !hostdata->connected && 593 !hostdata->connected &&
625 !hostdata->selecting) 594 !hostdata->selecting) {
626 NCR5380_release_dma_irq(instance); 595 NCR5380_release_dma_irq(instance);
596 }
627} 597}
628 598
629/** 599/**
@@ -962,6 +932,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
962 932
963static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, 933static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
964 struct scsi_cmnd *cmd) 934 struct scsi_cmnd *cmd)
935 __releases(&hostdata->lock) __acquires(&hostdata->lock)
965{ 936{
966 struct NCR5380_hostdata *hostdata = shost_priv(instance); 937 struct NCR5380_hostdata *hostdata = shost_priv(instance);
967 unsigned char tmp[3], phase; 938 unsigned char tmp[3], phase;
@@ -1194,8 +1165,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1194 data = tmp; 1165 data = tmp;
1195 phase = PHASE_MSGOUT; 1166 phase = PHASE_MSGOUT;
1196 NCR5380_transfer_pio(instance, &phase, &len, &data); 1167 NCR5380_transfer_pio(instance, &phase, &len, &data);
1168 if (len) {
1169 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1170 cmd->result = DID_ERROR << 16;
1171 complete_cmd(instance, cmd);
1172 dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
1173 cmd = NULL;
1174 goto out;
1175 }
1176
1197 dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); 1177 dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n");
1198 /* XXX need to handle errors here */
1199 1178
1200 hostdata->connected = cmd; 1179 hostdata->connected = cmd;
1201 hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; 1180 hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun;
@@ -1654,6 +1633,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
1654 */ 1633 */
1655 1634
1656static void NCR5380_information_transfer(struct Scsi_Host *instance) 1635static void NCR5380_information_transfer(struct Scsi_Host *instance)
1636 __releases(&hostdata->lock) __acquires(&hostdata->lock)
1657{ 1637{
1658 struct NCR5380_hostdata *hostdata = shost_priv(instance); 1638 struct NCR5380_hostdata *hostdata = shost_priv(instance);
1659 unsigned char msgout = NOP; 1639 unsigned char msgout = NOP;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 51a3567a6fb2..d78f0957d865 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -81,11 +81,7 @@
81#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */ 81#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
82#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */ 82#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
83 83
84#ifdef DIFFERENTIAL
85#define ICR_BASE ICR_DIFF_ENABLE
86#else
87#define ICR_BASE 0 84#define ICR_BASE 0
88#endif
89 85
90#define MODE_REG 2 86#define MODE_REG 2
91/* 87/*
@@ -102,11 +98,7 @@
102#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */ 98#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */
103#define MR_ARBITRATE 0x01 /* rw start arbitration */ 99#define MR_ARBITRATE 0x01 /* rw start arbitration */
104 100
105#ifdef PARITY
106#define MR_BASE MR_ENABLE_PAR_CHECK
107#else
108#define MR_BASE 0 101#define MR_BASE 0
109#endif
110 102
111#define TARGET_COMMAND_REG 3 103#define TARGET_COMMAND_REG 3
112#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */ 104#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */
@@ -174,11 +166,7 @@
174#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */ 166#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */
175#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */ 167#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */
176 168
177#if 0
178#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR
179#else
180#define CSR_BASE CSR_53C80_INTR 169#define CSR_BASE CSR_53C80_INTR
181#endif
182 170
183/* Note : PHASE_* macros are based on the values of the STATUS register */ 171/* Note : PHASE_* macros are based on the values of the STATUS register */
184#define PHASE_MASK (SR_MSG | SR_CD | SR_IO) 172#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
@@ -234,11 +222,9 @@ struct NCR5380_hostdata {
234 unsigned char id_higher_mask; /* All bits above id_mask */ 222 unsigned char id_higher_mask; /* All bits above id_mask */
235 unsigned char last_message; /* Last Message Out */ 223 unsigned char last_message; /* Last Message Out */
236 unsigned long region_size; /* Size of address/port range */ 224 unsigned long region_size; /* Size of address/port range */
237 char info[256]; 225 char info[168]; /* Host banner message */
238}; 226};
239 227
240#ifdef __KERNEL__
241
242struct NCR5380_cmd { 228struct NCR5380_cmd {
243 struct list_head list; 229 struct list_head list;
244}; 230};
@@ -331,5 +317,4 @@ static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
331 return 0; 317 return 0;
332} 318}
333 319
334#endif /* __KERNEL__ */
335#endif /* NCR5380_H */ 320#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 1ee7c654f7b8..907f1e80665b 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -22,6 +23,11 @@
22 * along with this program; see the file COPYING. If not, write to 23 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 * 25 *
26 * Module Name:
27 * aachba.c
28 *
29 * Abstract: Contains Interfaces to manage IOs.
30 *
25 */ 31 */
26 32
27#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -62,6 +68,7 @@
62#define SENCODE_END_OF_DATA 0x00 68#define SENCODE_END_OF_DATA 0x00
63#define SENCODE_BECOMING_READY 0x04 69#define SENCODE_BECOMING_READY 0x04
64#define SENCODE_INIT_CMD_REQUIRED 0x04 70#define SENCODE_INIT_CMD_REQUIRED 0x04
71#define SENCODE_UNRECOVERED_READ_ERROR 0x11
65#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A 72#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
66#define SENCODE_INVALID_COMMAND 0x20 73#define SENCODE_INVALID_COMMAND 0x20
67#define SENCODE_LBA_OUT_OF_RANGE 0x21 74#define SENCODE_LBA_OUT_OF_RANGE 0x21
@@ -106,6 +113,8 @@
106#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 113#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
107#define ASENCODE_OVERLAPPED_COMMAND 0x00 114#define ASENCODE_OVERLAPPED_COMMAND 0x00
108 115
116#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
117
109#define BYTE0(x) (unsigned char)(x) 118#define BYTE0(x) (unsigned char)(x)
110#define BYTE1(x) (unsigned char)((x) >> 8) 119#define BYTE1(x) (unsigned char)((x) >> 8)
111#define BYTE2(x) (unsigned char)((x) >> 16) 120#define BYTE2(x) (unsigned char)((x) >> 16)
@@ -164,46 +173,56 @@ struct inquiry_data {
164}; 173};
165 174
166/* Added for VPD 0x83 */ 175/* Added for VPD 0x83 */
167typedef struct { 176struct tvpd_id_descriptor_type_1 {
168 u8 CodeSet:4; /* VPD_CODE_SET */ 177 u8 codeset:4; /* VPD_CODE_SET */
169 u8 Reserved:4; 178 u8 reserved:4;
170 u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ 179 u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
171 u8 Reserved2:4; 180 u8 reserved2:4;
172 u8 Reserved3; 181 u8 reserved3;
173 u8 IdentifierLength; 182 u8 identifierlength;
174 u8 VendId[8]; 183 u8 venid[8];
175 u8 ProductId[16]; 184 u8 productid[16];
176 u8 SerialNumber[8]; /* SN in ASCII */ 185 u8 serialnumber[8]; /* SN in ASCII */
177
178} TVPD_ID_Descriptor_Type_1;
179 186
180typedef struct { 187};
181 u8 CodeSet:4; /* VPD_CODE_SET */ 188
182 u8 Reserved:4; 189struct tvpd_id_descriptor_type_2 {
183 u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ 190 u8 codeset:4; /* VPD_CODE_SET */
184 u8 Reserved2:4; 191 u8 reserved:4;
185 u8 Reserved3; 192 u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */
186 u8 IdentifierLength; 193 u8 reserved2:4;
187 struct TEU64Id { 194 u8 reserved3;
195 u8 identifierlength;
196 struct teu64id {
188 u32 Serial; 197 u32 Serial;
189 /* The serial number supposed to be 40 bits, 198 /* The serial number supposed to be 40 bits,
190 * bit we only support 32, so make the last byte zero. */ 199 * bit we only support 32, so make the last byte zero. */
191 u8 Reserved; 200 u8 reserved;
192 u8 VendId[3]; 201 u8 venid[3];
193 } EU64Id; 202 } eu64id;
194 203
195} TVPD_ID_Descriptor_Type_2; 204};
196 205
197typedef struct { 206struct tvpd_id_descriptor_type_3 {
207 u8 codeset : 4; /* VPD_CODE_SET */
208 u8 reserved : 4;
209 u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */
210 u8 reserved2 : 4;
211 u8 reserved3;
212 u8 identifierlength;
213 u8 Identifier[16];
214};
215
216struct tvpd_page83 {
198 u8 DeviceType:5; 217 u8 DeviceType:5;
199 u8 DeviceTypeQualifier:3; 218 u8 DeviceTypeQualifier:3;
200 u8 PageCode; 219 u8 PageCode;
201 u8 Reserved; 220 u8 reserved;
202 u8 PageLength; 221 u8 PageLength;
203 TVPD_ID_Descriptor_Type_1 IdDescriptorType1; 222 struct tvpd_id_descriptor_type_1 type1;
204 TVPD_ID_Descriptor_Type_2 IdDescriptorType2; 223 struct tvpd_id_descriptor_type_2 type2;
205 224 struct tvpd_id_descriptor_type_3 type3;
206} TVPD_Page83; 225};
207 226
208/* 227/*
209 * M O D U L E G L O B A L S 228 * M O D U L E G L O B A L S
@@ -214,9 +233,13 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
214static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg); 233static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
215static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, 234static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
216 struct aac_raw_io2 *rio2, int sg_max); 235 struct aac_raw_io2 *rio2, int sg_max);
236static long aac_build_sghba(struct scsi_cmnd *scsicmd,
237 struct aac_hba_cmd_req *hbacmd,
238 int sg_max, u64 sg_address);
217static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, 239static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
218 int pages, int nseg, int nseg_new); 240 int pages, int nseg, int nseg_new);
219static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); 241static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
242static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
220#ifdef AAC_DETAILED_STATUS_INFO 243#ifdef AAC_DETAILED_STATUS_INFO
221static char *aac_get_status_string(u32 status); 244static char *aac_get_status_string(u32 status);
222#endif 245#endif
@@ -327,7 +350,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
327 } 350 }
328 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 351 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
329 device = scsicmd->device; 352 device = scsicmd->device;
330 if (unlikely(!device || !scsi_device_online(device))) { 353 if (unlikely(!device)) {
331 dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n")); 354 dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
332 aac_fib_complete(fibptr); 355 aac_fib_complete(fibptr);
333 return 0; 356 return 0;
@@ -473,16 +496,26 @@ int aac_get_containers(struct aac_dev *dev)
473 496
474 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 497 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
475 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 498 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
476 fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers, 499 if (dev->fsa_dev == NULL ||
477 GFP_KERNEL); 500 dev->maximum_num_containers != maximum_num_containers) {
478 if (!fsa_dev_ptr) 501
479 return -ENOMEM; 502 fsa_dev_ptr = dev->fsa_dev;
503
504 dev->fsa_dev = kcalloc(maximum_num_containers,
505 sizeof(*fsa_dev_ptr), GFP_KERNEL);
506
507 kfree(fsa_dev_ptr);
508 fsa_dev_ptr = NULL;
480 509
481 dev->fsa_dev = fsa_dev_ptr;
482 dev->maximum_num_containers = maximum_num_containers;
483 510
484 for (index = 0; index < dev->maximum_num_containers; ) { 511 if (!dev->fsa_dev)
485 fsa_dev_ptr[index].devname[0] = '\0'; 512 return -ENOMEM;
513
514 dev->maximum_num_containers = maximum_num_containers;
515 }
516 for (index = 0; index < dev->maximum_num_containers; index++) {
517 dev->fsa_dev[index].devname[0] = '\0';
518 dev->fsa_dev[index].valid = 0;
486 519
487 status = aac_probe_container(dev, index); 520 status = aac_probe_container(dev, index);
488 521
@@ -490,12 +523,6 @@ int aac_get_containers(struct aac_dev *dev)
490 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); 523 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
491 break; 524 break;
492 } 525 }
493
494 /*
495 * If there are no more containers, then stop asking.
496 */
497 if (++index >= status)
498 break;
499 } 526 }
500 return status; 527 return status;
501} 528}
@@ -602,6 +629,7 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
602 struct fsa_dev_info *fsa_dev_ptr; 629 struct fsa_dev_info *fsa_dev_ptr;
603 int (*callback)(struct scsi_cmnd *); 630 int (*callback)(struct scsi_cmnd *);
604 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context; 631 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
632 int i;
605 633
606 634
607 if (!aac_valid_context(scsicmd, fibptr)) 635 if (!aac_valid_context(scsicmd, fibptr))
@@ -624,6 +652,10 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
624 fsa_dev_ptr->block_size = 652 fsa_dev_ptr->block_size =
625 le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size); 653 le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
626 } 654 }
655 for (i = 0; i < 16; i++)
656 fsa_dev_ptr->identifier[i] =
657 dresp->mnt[0].fileinfo.bdevinfo
658 .identifier[i];
627 fsa_dev_ptr->valid = 1; 659 fsa_dev_ptr->valid = 1;
628 /* sense_key holds the current state of the spin-up */ 660 /* sense_key holds the current state of the spin-up */
629 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY)) 661 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
@@ -918,6 +950,28 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
918 inqstrcpy ("V1.0", str->prl); 950 inqstrcpy ("V1.0", str->prl);
919} 951}
920 952
953static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
954 struct aac_dev *dev, struct scsi_cmnd *scsicmd)
955{
956 int container;
957
958 vpdpage83data->type3.codeset = 1;
959 vpdpage83data->type3.identifiertype = 3;
960 vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
961 - 4;
962
963 for (container = 0; container < dev->maximum_num_containers;
964 container++) {
965
966 if (scmd_id(scsicmd) == container) {
967 memcpy(vpdpage83data->type3.Identifier,
968 dev->fsa_dev[container].identifier,
969 16);
970 break;
971 }
972 }
973}
974
921static void get_container_serial_callback(void *context, struct fib * fibptr) 975static void get_container_serial_callback(void *context, struct fib * fibptr)
922{ 976{
923 struct aac_get_serial_resp * get_serial_reply; 977 struct aac_get_serial_resp * get_serial_reply;
@@ -935,39 +989,47 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
935 /*Check to see if it's for VPD 0x83 or 0x80 */ 989 /*Check to see if it's for VPD 0x83 or 0x80 */
936 if (scsicmd->cmnd[2] == 0x83) { 990 if (scsicmd->cmnd[2] == 0x83) {
937 /* vpd page 0x83 - Device Identification Page */ 991 /* vpd page 0x83 - Device Identification Page */
992 struct aac_dev *dev;
938 int i; 993 int i;
939 TVPD_Page83 VPDPage83Data; 994 struct tvpd_page83 vpdpage83data;
995
996 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
940 997
941 memset(((u8 *)&VPDPage83Data), 0, 998 memset(((u8 *)&vpdpage83data), 0,
942 sizeof(VPDPage83Data)); 999 sizeof(vpdpage83data));
943 1000
944 /* DIRECT_ACCESS_DEVIC */ 1001 /* DIRECT_ACCESS_DEVIC */
945 VPDPage83Data.DeviceType = 0; 1002 vpdpage83data.DeviceType = 0;
946 /* DEVICE_CONNECTED */ 1003 /* DEVICE_CONNECTED */
947 VPDPage83Data.DeviceTypeQualifier = 0; 1004 vpdpage83data.DeviceTypeQualifier = 0;
948 /* VPD_DEVICE_IDENTIFIERS */ 1005 /* VPD_DEVICE_IDENTIFIERS */
949 VPDPage83Data.PageCode = 0x83; 1006 vpdpage83data.PageCode = 0x83;
950 VPDPage83Data.Reserved = 0; 1007 vpdpage83data.reserved = 0;
951 VPDPage83Data.PageLength = 1008 vpdpage83data.PageLength =
952 sizeof(VPDPage83Data.IdDescriptorType1) + 1009 sizeof(vpdpage83data.type1) +
953 sizeof(VPDPage83Data.IdDescriptorType2); 1010 sizeof(vpdpage83data.type2);
1011
1012 /* VPD 83 Type 3 is not supported for ARC */
1013 if (dev->sa_firmware)
1014 vpdpage83data.PageLength +=
1015 sizeof(vpdpage83data.type3);
954 1016
955 /* T10 Vendor Identifier Field Format */ 1017 /* T10 Vendor Identifier Field Format */
956 /* VpdCodeSetAscii */ 1018 /* VpdcodesetAscii */
957 VPDPage83Data.IdDescriptorType1.CodeSet = 2; 1019 vpdpage83data.type1.codeset = 2;
958 /* VpdIdentifierTypeVendorId */ 1020 /* VpdIdentifierTypeVendorId */
959 VPDPage83Data.IdDescriptorType1.IdentifierType = 1; 1021 vpdpage83data.type1.identifiertype = 1;
960 VPDPage83Data.IdDescriptorType1.IdentifierLength = 1022 vpdpage83data.type1.identifierlength =
961 sizeof(VPDPage83Data.IdDescriptorType1) - 4; 1023 sizeof(vpdpage83data.type1) - 4;
962 1024
963 /* "ADAPTEC " for adaptec */ 1025 /* "ADAPTEC " for adaptec */
964 memcpy(VPDPage83Data.IdDescriptorType1.VendId, 1026 memcpy(vpdpage83data.type1.venid,
965 "ADAPTEC ", 1027 "ADAPTEC ",
966 sizeof(VPDPage83Data.IdDescriptorType1.VendId)); 1028 sizeof(vpdpage83data.type1.venid));
967 memcpy(VPDPage83Data.IdDescriptorType1.ProductId, 1029 memcpy(vpdpage83data.type1.productid,
968 "ARRAY ", 1030 "ARRAY ",
969 sizeof( 1031 sizeof(
970 VPDPage83Data.IdDescriptorType1.ProductId)); 1032 vpdpage83data.type1.productid));
971 1033
972 /* Convert to ascii based serial number. 1034 /* Convert to ascii based serial number.
973 * The LSB is the the end. 1035 * The LSB is the the end.
@@ -976,32 +1038,41 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
976 u8 temp = 1038 u8 temp =
977 (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF); 1039 (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
978 if (temp > 0x9) { 1040 if (temp > 0x9) {
979 VPDPage83Data.IdDescriptorType1.SerialNumber[i] = 1041 vpdpage83data.type1.serialnumber[i] =
980 'A' + (temp - 0xA); 1042 'A' + (temp - 0xA);
981 } else { 1043 } else {
982 VPDPage83Data.IdDescriptorType1.SerialNumber[i] = 1044 vpdpage83data.type1.serialnumber[i] =
983 '0' + temp; 1045 '0' + temp;
984 } 1046 }
985 } 1047 }
986 1048
987 /* VpdCodeSetBinary */ 1049 /* VpdCodeSetBinary */
988 VPDPage83Data.IdDescriptorType2.CodeSet = 1; 1050 vpdpage83data.type2.codeset = 1;
989 /* VpdIdentifierTypeEUI64 */ 1051 /* VpdidentifiertypeEUI64 */
990 VPDPage83Data.IdDescriptorType2.IdentifierType = 2; 1052 vpdpage83data.type2.identifiertype = 2;
991 VPDPage83Data.IdDescriptorType2.IdentifierLength = 1053 vpdpage83data.type2.identifierlength =
992 sizeof(VPDPage83Data.IdDescriptorType2) - 4; 1054 sizeof(vpdpage83data.type2) - 4;
993 1055
994 VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0; 1056 vpdpage83data.type2.eu64id.venid[0] = 0xD0;
995 VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0; 1057 vpdpage83data.type2.eu64id.venid[1] = 0;
996 VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0; 1058 vpdpage83data.type2.eu64id.venid[2] = 0;
997 1059
998 VPDPage83Data.IdDescriptorType2.EU64Id.Serial = 1060 vpdpage83data.type2.eu64id.Serial =
999 get_serial_reply->uid; 1061 get_serial_reply->uid;
1000 VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0; 1062 vpdpage83data.type2.eu64id.reserved = 0;
1063
1064 /*
1065 * VpdIdentifierTypeFCPHName
1066 * VPD 0x83 Type 3 not supported for ARC
1067 */
1068 if (dev->sa_firmware) {
1069 build_vpd83_type3(&vpdpage83data,
1070 dev, scsicmd);
1071 }
1001 1072
1002 /* Move the inquiry data to the response buffer. */ 1073 /* Move the inquiry data to the response buffer. */
1003 scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data, 1074 scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
1004 sizeof(VPDPage83Data)); 1075 sizeof(vpdpage83data));
1005 } else { 1076 } else {
1006 /* It must be for VPD 0x80 */ 1077 /* It must be for VPD 0x80 */
1007 char sp[13]; 1078 char sp[13];
@@ -1144,7 +1215,9 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
1144 long ret; 1215 long ret;
1145 1216
1146 aac_fib_init(fib); 1217 aac_fib_init(fib);
1147 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) { 1218 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1219 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1220 !dev->sync_mode) {
1148 struct aac_raw_io2 *readcmd2; 1221 struct aac_raw_io2 *readcmd2;
1149 readcmd2 = (struct aac_raw_io2 *) fib_data(fib); 1222 readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
1150 memset(readcmd2, 0, sizeof(struct aac_raw_io2)); 1223 memset(readcmd2, 0, sizeof(struct aac_raw_io2));
@@ -1270,7 +1343,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
1270 long ret; 1343 long ret;
1271 1344
1272 aac_fib_init(fib); 1345 aac_fib_init(fib);
1273 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) { 1346 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
1347 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
1348 !dev->sync_mode) {
1274 struct aac_raw_io2 *writecmd2; 1349 struct aac_raw_io2 *writecmd2;
1275 writecmd2 = (struct aac_raw_io2 *) fib_data(fib); 1350 writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
1276 memset(writecmd2, 0, sizeof(struct aac_raw_io2)); 1351 memset(writecmd2, 0, sizeof(struct aac_raw_io2));
@@ -1435,6 +1510,52 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
1435 return srbcmd; 1510 return srbcmd;
1436} 1511}
1437 1512
1513static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
1514 struct scsi_cmnd *cmd)
1515{
1516 struct aac_hba_cmd_req *hbacmd;
1517 struct aac_dev *dev;
1518 int bus, target;
1519 u64 address;
1520
1521 dev = (struct aac_dev *)cmd->device->host->hostdata;
1522
1523 hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
1524 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
1525 /* iu_type is a parameter of aac_hba_send */
1526 switch (cmd->sc_data_direction) {
1527 case DMA_TO_DEVICE:
1528 hbacmd->byte1 = 2;
1529 break;
1530 case DMA_FROM_DEVICE:
1531 case DMA_BIDIRECTIONAL:
1532 hbacmd->byte1 = 1;
1533 break;
1534 case DMA_NONE:
1535 default:
1536 break;
1537 }
1538 hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
1539
1540 bus = aac_logical_to_phys(scmd_channel(cmd));
1541 target = scmd_id(cmd);
1542 hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
1543
1544 /* we fill in reply_qid later in aac_src_deliver_message */
1545 /* we fill in iu_type, request_id later in aac_hba_send */
1546 /* we fill in emb_data_desc_count later in aac_build_sghba */
1547
1548 memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
1549 hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
1550
1551 address = (u64)fib->hw_error_pa;
1552 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
1553 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
1554 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
1555
1556 return hbacmd;
1557}
1558
1438static void aac_srb_callback(void *context, struct fib * fibptr); 1559static void aac_srb_callback(void *context, struct fib * fibptr);
1439 1560
1440static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) 1561static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
@@ -1505,11 +1626,243 @@ static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
1505 return aac_scsi_32(fib, cmd); 1626 return aac_scsi_32(fib, cmd);
1506} 1627}
1507 1628
1629static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
1630{
1631 struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
1632 struct aac_dev *dev;
1633 long ret;
1634
1635 dev = (struct aac_dev *)cmd->device->host->hostdata;
1636
1637 ret = aac_build_sghba(cmd, hbacmd,
1638 dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
1639 if (ret < 0)
1640 return ret;
1641
1642 /*
1643 * Now send the HBA command to the adapter
1644 */
1645 fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
1646 sizeof(struct aac_hba_sgl);
1647
1648 return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
1649 (fib_callback) aac_hba_callback,
1650 (void *) cmd);
1651}
1652
1653int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
1654{
1655 struct fib *fibptr;
1656 struct aac_srb *srbcmd;
1657 struct sgmap64 *sg64;
1658 struct aac_ciss_identify_pd *identify_resp;
1659 dma_addr_t addr;
1660 u32 vbus, vid;
1661 u16 fibsize, datasize;
1662 int rcode = -ENOMEM;
1663
1664
1665 fibptr = aac_fib_alloc(dev);
1666 if (!fibptr)
1667 goto out;
1668
1669 fibsize = sizeof(struct aac_srb) -
1670 sizeof(struct sgentry) + sizeof(struct sgentry64);
1671 datasize = sizeof(struct aac_ciss_identify_pd);
1672
1673 identify_resp = pci_alloc_consistent(dev->pdev, datasize, &addr);
1674
1675 if (!identify_resp)
1676 goto fib_free_ptr;
1677
1678 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
1679 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
1680
1681 aac_fib_init(fibptr);
1682
1683 srbcmd = (struct aac_srb *) fib_data(fibptr);
1684 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1685 srbcmd->channel = cpu_to_le32(vbus);
1686 srbcmd->id = cpu_to_le32(vid);
1687 srbcmd->lun = 0;
1688 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1689 srbcmd->timeout = cpu_to_le32(10);
1690 srbcmd->retry_limit = 0;
1691 srbcmd->cdb_size = cpu_to_le32(12);
1692 srbcmd->count = cpu_to_le32(datasize);
1693
1694 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1695 srbcmd->cdb[0] = 0x26;
1696 srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
1697 srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
1698
1699 sg64 = (struct sgmap64 *)&srbcmd->sg;
1700 sg64->count = cpu_to_le32(1);
1701 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
1702 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
1703 sg64->sg[0].count = cpu_to_le32(datasize);
1704
1705 rcode = aac_fib_send(ScsiPortCommand64,
1706 fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
1707
1708 if (identify_resp->current_queue_depth_limit <= 0 ||
1709 identify_resp->current_queue_depth_limit > 32)
1710 dev->hba_map[bus][target].qd_limit = 32;
1711 else
1712 dev->hba_map[bus][target].qd_limit =
1713 identify_resp->current_queue_depth_limit;
1714
1715 pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr);
1716
1717 aac_fib_complete(fibptr);
1718
1719fib_free_ptr:
1720 aac_fib_free(fibptr);
1721out:
1722 return rcode;
1723}
1724
1725/**
1726 * aac_update hba_map()- update current hba map with data from FW
1727 * @dev: aac_dev structure
1728 * @phys_luns: FW information from report phys luns
1729 *
1730 * Update our hba map with the information gathered from the FW
1731 */
1732void aac_update_hba_map(struct aac_dev *dev,
1733 struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
1734{
1735 /* ok and extended reporting */
1736 u32 lun_count, nexus;
1737 u32 i, bus, target;
1738 u8 expose_flag, attribs;
1739 u8 devtype;
1740
1741 lun_count = ((phys_luns->list_length[0] << 24)
1742 + (phys_luns->list_length[1] << 16)
1743 + (phys_luns->list_length[2] << 8)
1744 + (phys_luns->list_length[3])) / 24;
1745
1746 for (i = 0; i < lun_count; ++i) {
1747
1748 bus = phys_luns->lun[i].level2[1] & 0x3f;
1749 target = phys_luns->lun[i].level2[0];
1750 expose_flag = phys_luns->lun[i].bus >> 6;
1751 attribs = phys_luns->lun[i].node_ident[9];
1752 nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
1753
1754 if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
1755 continue;
1756
1757 dev->hba_map[bus][target].expose = expose_flag;
1758
1759 if (expose_flag != 0) {
1760 devtype = AAC_DEVTYPE_RAID_MEMBER;
1761 goto update_devtype;
1762 }
1763
1764 if (nexus != 0 && (attribs & 8)) {
1765 devtype = AAC_DEVTYPE_NATIVE_RAW;
1766 dev->hba_map[bus][target].rmw_nexus =
1767 nexus;
1768 } else
1769 devtype = AAC_DEVTYPE_ARC_RAW;
1770
1771 if (devtype != AAC_DEVTYPE_NATIVE_RAW)
1772 goto update_devtype;
1773
1774 if (aac_issue_bmic_identify(dev, bus, target) < 0)
1775 dev->hba_map[bus][target].qd_limit = 32;
1776
1777update_devtype:
1778 if (rescan == AAC_INIT)
1779 dev->hba_map[bus][target].devtype = devtype;
1780 else
1781 dev->hba_map[bus][target].new_devtype = devtype;
1782 }
1783}
1784
1785/**
1786 * aac_report_phys_luns() Process topology change
1787 * @dev: aac_dev structure
1788 * @fibptr: fib pointer
1789 *
1790 * Execute a CISS REPORT PHYS LUNS and process the results into
1791 * the current hba_map.
1792 */
1793int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
1794{
1795 int fibsize, datasize;
1796 struct aac_ciss_phys_luns_resp *phys_luns;
1797 struct aac_srb *srbcmd;
1798 struct sgmap64 *sg64;
1799 dma_addr_t addr;
1800 u32 vbus, vid;
1801 int rcode = 0;
1802
1803 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
1804 fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
1805 + sizeof(struct sgentry64);
1806 datasize = sizeof(struct aac_ciss_phys_luns_resp)
1807 + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1808
1809 phys_luns = (struct aac_ciss_phys_luns_resp *) pci_alloc_consistent(
1810 dev->pdev, datasize, &addr);
1811
1812 if (phys_luns == NULL) {
1813 rcode = -ENOMEM;
1814 goto err_out;
1815 }
1816
1817 vbus = (u32) le16_to_cpu(
1818 dev->supplement_adapter_info.VirtDeviceBus);
1819 vid = (u32) le16_to_cpu(
1820 dev->supplement_adapter_info.VirtDeviceTarget);
1821
1822 aac_fib_init(fibptr);
1823
1824 srbcmd = (struct aac_srb *) fib_data(fibptr);
1825 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1826 srbcmd->channel = cpu_to_le32(vbus);
1827 srbcmd->id = cpu_to_le32(vid);
1828 srbcmd->lun = 0;
1829 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1830 srbcmd->timeout = cpu_to_le32(10);
1831 srbcmd->retry_limit = 0;
1832 srbcmd->cdb_size = cpu_to_le32(12);
1833 srbcmd->count = cpu_to_le32(datasize);
1834
1835 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1836 srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
1837 srbcmd->cdb[1] = 2; /* extended reporting */
1838 srbcmd->cdb[8] = (u8)(datasize >> 8);
1839 srbcmd->cdb[9] = (u8)(datasize);
1840
1841 sg64 = (struct sgmap64 *) &srbcmd->sg;
1842 sg64->count = cpu_to_le32(1);
1843 sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
1844 sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
1845 sg64->sg[0].count = cpu_to_le32(datasize);
1846
1847 rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
1848 FsaNormal, 1, 1, NULL, NULL);
1849
1850 /* analyse data */
1851 if (rcode >= 0 && phys_luns->resp_flag == 2) {
1852 /* ok and extended reporting */
1853 aac_update_hba_map(dev, phys_luns, rescan);
1854 }
1855
1856 pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr);
1857err_out:
1858 return rcode;
1859}
1860
1508int aac_get_adapter_info(struct aac_dev* dev) 1861int aac_get_adapter_info(struct aac_dev* dev)
1509{ 1862{
1510 struct fib* fibptr; 1863 struct fib* fibptr;
1511 int rcode; 1864 int rcode;
1512 u32 tmp; 1865 u32 tmp, bus, target;
1513 struct aac_adapter_info *info; 1866 struct aac_adapter_info *info;
1514 struct aac_bus_info *command; 1867 struct aac_bus_info *command;
1515 struct aac_bus_info_response *bus_info; 1868 struct aac_bus_info_response *bus_info;
@@ -1540,6 +1893,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
1540 } 1893 }
1541 memcpy(&dev->adapter_info, info, sizeof(*info)); 1894 memcpy(&dev->adapter_info, info, sizeof(*info));
1542 1895
1896 dev->supplement_adapter_info.VirtDeviceBus = 0xffff;
1543 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 1897 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
1544 struct aac_supplement_adapter_info * sinfo; 1898 struct aac_supplement_adapter_info * sinfo;
1545 1899
@@ -1567,6 +1921,13 @@ int aac_get_adapter_info(struct aac_dev* dev)
1567 1921
1568 } 1922 }
1569 1923
1924 /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
1925 for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
1926 for (target = 0; target < AAC_MAX_TARGETS; target++) {
1927 dev->hba_map[bus][target].devtype = 0;
1928 dev->hba_map[bus][target].qd_limit = 0;
1929 }
1930 }
1570 1931
1571 /* 1932 /*
1572 * GetBusInfo 1933 * GetBusInfo
@@ -1599,6 +1960,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
1599 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 1960 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
1600 } 1961 }
1601 1962
1963 if (!dev->sync_mode && dev->sa_firmware &&
1964 dev->supplement_adapter_info.VirtDeviceBus != 0xffff) {
1965 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
1966 rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
1967 }
1968
1602 if (!dev->in_reset) { 1969 if (!dev->in_reset) {
1603 char buffer[16]; 1970 char buffer[16];
1604 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 1971 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -1765,6 +2132,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
1765 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 2132 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
1766 } 2133 }
1767 } 2134 }
2135 if (!dev->sync_mode && dev->sa_firmware &&
2136 dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
2137 dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
2138 HBA_MAX_SG_SEPARATE;
2139
1768 /* FIB should be freed only after getting the response from the F/W */ 2140 /* FIB should be freed only after getting the response from the F/W */
1769 if (rcode != -ERESTARTSYS) { 2141 if (rcode != -ERESTARTSYS) {
1770 aac_fib_complete(fibptr); 2142 aac_fib_complete(fibptr);
@@ -1845,6 +2217,15 @@ static void io_callback(void *context, struct fib * fibptr)
1845 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2217 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1846 SCSI_SENSE_BUFFERSIZE)); 2218 SCSI_SENSE_BUFFERSIZE));
1847 break; 2219 break;
2220 case ST_MEDERR:
2221 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2222 SAM_STAT_CHECK_CONDITION;
2223 set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
2224 SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
2225 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2226 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2227 SCSI_SENSE_BUFFERSIZE));
2228 break;
1848 default: 2229 default:
1849#ifdef AAC_DETAILED_STATUS_INFO 2230#ifdef AAC_DETAILED_STATUS_INFO
1850 printk(KERN_WARNING "io_callback: io failed, status = %d\n", 2231 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
@@ -2312,7 +2693,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
2312 2693
2313int aac_scsi_cmd(struct scsi_cmnd * scsicmd) 2694int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2314{ 2695{
2315 u32 cid; 2696 u32 cid, bus;
2316 struct Scsi_Host *host = scsicmd->device->host; 2697 struct Scsi_Host *host = scsicmd->device->host;
2317 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 2698 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2318 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 2699 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
@@ -2330,8 +2711,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2330 if((cid >= dev->maximum_num_containers) || 2711 if((cid >= dev->maximum_num_containers) ||
2331 (scsicmd->device->lun != 0)) { 2712 (scsicmd->device->lun != 0)) {
2332 scsicmd->result = DID_NO_CONNECT << 16; 2713 scsicmd->result = DID_NO_CONNECT << 16;
2333 scsicmd->scsi_done(scsicmd); 2714 goto scsi_done_ret;
2334 return 0;
2335 } 2715 }
2336 2716
2337 /* 2717 /*
@@ -2359,15 +2739,30 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2359 } 2739 }
2360 } 2740 }
2361 } else { /* check for physical non-dasd devices */ 2741 } else { /* check for physical non-dasd devices */
2362 if (dev->nondasd_support || expose_physicals || 2742 bus = aac_logical_to_phys(scmd_channel(scsicmd));
2363 dev->jbod) { 2743 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2744 (dev->hba_map[bus][cid].expose
2745 == AAC_HIDE_DISK)){
2746 if (scsicmd->cmnd[0] == INQUIRY) {
2747 scsicmd->result = DID_NO_CONNECT << 16;
2748 goto scsi_done_ret;
2749 }
2750 }
2751
2752 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2753 dev->hba_map[bus][cid].devtype
2754 == AAC_DEVTYPE_NATIVE_RAW) {
2755 if (dev->in_reset)
2756 return -1;
2757 return aac_send_hba_fib(scsicmd);
2758 } else if (dev->nondasd_support || expose_physicals ||
2759 dev->jbod) {
2364 if (dev->in_reset) 2760 if (dev->in_reset)
2365 return -1; 2761 return -1;
2366 return aac_send_srb_fib(scsicmd); 2762 return aac_send_srb_fib(scsicmd);
2367 } else { 2763 } else {
2368 scsicmd->result = DID_NO_CONNECT << 16; 2764 scsicmd->result = DID_NO_CONNECT << 16;
2369 scsicmd->scsi_done(scsicmd); 2765 goto scsi_done_ret;
2370 return 0;
2371 } 2766 }
2372 } 2767 }
2373 } 2768 }
@@ -2385,13 +2780,34 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2385 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2780 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2386 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2781 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2387 SCSI_SENSE_BUFFERSIZE)); 2782 SCSI_SENSE_BUFFERSIZE));
2388 scsicmd->scsi_done(scsicmd); 2783 goto scsi_done_ret;
2389 return 0;
2390 } 2784 }
2391 2785
2392
2393 /* Handle commands here that don't really require going out to the adapter */
2394 switch (scsicmd->cmnd[0]) { 2786 switch (scsicmd->cmnd[0]) {
2787 case READ_6:
2788 case READ_10:
2789 case READ_12:
2790 case READ_16:
2791 if (dev->in_reset)
2792 return -1;
2793 return aac_read(scsicmd);
2794
2795 case WRITE_6:
2796 case WRITE_10:
2797 case WRITE_12:
2798 case WRITE_16:
2799 if (dev->in_reset)
2800 return -1;
2801 return aac_write(scsicmd);
2802
2803 case SYNCHRONIZE_CACHE:
2804 if (((aac_cache & 6) == 6) && dev->cache_protected) {
2805 scsicmd->result = AAC_STAT_GOOD;
2806 break;
2807 }
2808 /* Issue FIB to tell Firmware to flush it's cache */
2809 if ((aac_cache & 6) != 2)
2810 return aac_synchronize(scsicmd);
2395 case INQUIRY: 2811 case INQUIRY:
2396 { 2812 {
2397 struct inquiry_data inq_data; 2813 struct inquiry_data inq_data;
@@ -2414,8 +2830,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2414 arr[1] = scsicmd->cmnd[2]; 2830 arr[1] = scsicmd->cmnd[2];
2415 scsi_sg_copy_from_buffer(scsicmd, &inq_data, 2831 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2416 sizeof(inq_data)); 2832 sizeof(inq_data));
2417 scsicmd->result = DID_OK << 16 | 2833 scsicmd->result = AAC_STAT_GOOD;
2418 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2419 } else if (scsicmd->cmnd[2] == 0x80) { 2834 } else if (scsicmd->cmnd[2] == 0x80) {
2420 /* unit serial number page */ 2835 /* unit serial number page */
2421 arr[3] = setinqserial(dev, &arr[4], 2836 arr[3] = setinqserial(dev, &arr[4],
@@ -2426,8 +2841,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2426 if (aac_wwn != 2) 2841 if (aac_wwn != 2)
2427 return aac_get_container_serial( 2842 return aac_get_container_serial(
2428 scsicmd); 2843 scsicmd);
2429 scsicmd->result = DID_OK << 16 | 2844 scsicmd->result = AAC_STAT_GOOD;
2430 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2431 } else if (scsicmd->cmnd[2] == 0x83) { 2845 } else if (scsicmd->cmnd[2] == 0x83) {
2432 /* vpd page 0x83 - Device Identification Page */ 2846 /* vpd page 0x83 - Device Identification Page */
2433 char *sno = (char *)&inq_data; 2847 char *sno = (char *)&inq_data;
@@ -2436,8 +2850,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2436 if (aac_wwn != 2) 2850 if (aac_wwn != 2)
2437 return aac_get_container_serial( 2851 return aac_get_container_serial(
2438 scsicmd); 2852 scsicmd);
2439 scsicmd->result = DID_OK << 16 | 2853 scsicmd->result = AAC_STAT_GOOD;
2440 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2441 } else { 2854 } else {
2442 /* vpd page not implemented */ 2855 /* vpd page not implemented */
2443 scsicmd->result = DID_OK << 16 | 2856 scsicmd->result = DID_OK << 16 |
@@ -2452,8 +2865,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2452 sizeof(dev->fsa_dev[cid].sense_data), 2865 sizeof(dev->fsa_dev[cid].sense_data),
2453 SCSI_SENSE_BUFFERSIZE)); 2866 SCSI_SENSE_BUFFERSIZE));
2454 } 2867 }
2455 scsicmd->scsi_done(scsicmd); 2868 break;
2456 return 0;
2457 } 2869 }
2458 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 2870 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
2459 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 2871 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
@@ -2469,9 +2881,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2469 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ 2881 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
2470 scsi_sg_copy_from_buffer(scsicmd, &inq_data, 2882 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
2471 sizeof(inq_data)); 2883 sizeof(inq_data));
2472 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2884 scsicmd->result = AAC_STAT_GOOD;
2473 scsicmd->scsi_done(scsicmd); 2885 break;
2474 return 0;
2475 } 2886 }
2476 if (dev->in_reset) 2887 if (dev->in_reset)
2477 return -1; 2888 return -1;
@@ -2519,10 +2930,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2519 /* Do not cache partition table for arrays */ 2930 /* Do not cache partition table for arrays */
2520 scsicmd->device->removable = 1; 2931 scsicmd->device->removable = 1;
2521 2932
2522 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2933 scsicmd->result = AAC_STAT_GOOD;
2523 scsicmd->scsi_done(scsicmd); 2934 break;
2524
2525 return 0;
2526 } 2935 }
2527 2936
2528 case READ_CAPACITY: 2937 case READ_CAPACITY:
@@ -2547,11 +2956,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2547 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); 2956 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
2548 /* Do not cache partition table for arrays */ 2957 /* Do not cache partition table for arrays */
2549 scsicmd->device->removable = 1; 2958 scsicmd->device->removable = 1;
2550 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2959 scsicmd->result = AAC_STAT_GOOD;
2551 SAM_STAT_GOOD; 2960 break;
2552 scsicmd->scsi_done(scsicmd);
2553
2554 return 0;
2555 } 2961 }
2556 2962
2557 case MODE_SENSE: 2963 case MODE_SENSE:
@@ -2629,10 +3035,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2629 scsi_sg_copy_from_buffer(scsicmd, 3035 scsi_sg_copy_from_buffer(scsicmd,
2630 (char *)&mpd, 3036 (char *)&mpd,
2631 mode_buf_length); 3037 mode_buf_length);
2632 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 3038 scsicmd->result = AAC_STAT_GOOD;
2633 scsicmd->scsi_done(scsicmd); 3039 break;
2634
2635 return 0;
2636 } 3040 }
2637 case MODE_SENSE_10: 3041 case MODE_SENSE_10:
2638 { 3042 {
@@ -2708,18 +3112,17 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2708 (char *)&mpd10, 3112 (char *)&mpd10,
2709 mode_buf_length); 3113 mode_buf_length);
2710 3114
2711 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 3115 scsicmd->result = AAC_STAT_GOOD;
2712 scsicmd->scsi_done(scsicmd); 3116 break;
2713
2714 return 0;
2715 } 3117 }
2716 case REQUEST_SENSE: 3118 case REQUEST_SENSE:
2717 dprintk((KERN_DEBUG "REQUEST SENSE command.\n")); 3119 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
2718 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data)); 3120 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2719 memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data)); 3121 sizeof(struct sense_data));
2720 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 3122 memset(&dev->fsa_dev[cid].sense_data, 0,
2721 scsicmd->scsi_done(scsicmd); 3123 sizeof(struct sense_data));
2722 return 0; 3124 scsicmd->result = AAC_STAT_GOOD;
3125 break;
2723 3126
2724 case ALLOW_MEDIUM_REMOVAL: 3127 case ALLOW_MEDIUM_REMOVAL:
2725 dprintk((KERN_DEBUG "LOCK command.\n")); 3128 dprintk((KERN_DEBUG "LOCK command.\n"));
@@ -2728,9 +3131,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2728 else 3131 else
2729 fsa_dev_ptr[cid].locked = 0; 3132 fsa_dev_ptr[cid].locked = 0;
2730 3133
2731 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 3134 scsicmd->result = AAC_STAT_GOOD;
2732 scsicmd->scsi_done(scsicmd); 3135 break;
2733 return 0;
2734 /* 3136 /*
2735 * These commands are all No-Ops 3137 * These commands are all No-Ops
2736 */ 3138 */
@@ -2746,80 +3148,41 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2746 min_t(size_t, 3148 min_t(size_t,
2747 sizeof(dev->fsa_dev[cid].sense_data), 3149 sizeof(dev->fsa_dev[cid].sense_data),
2748 SCSI_SENSE_BUFFERSIZE)); 3150 SCSI_SENSE_BUFFERSIZE));
2749 scsicmd->scsi_done(scsicmd); 3151 break;
2750 return 0;
2751 } 3152 }
2752 /* FALLTHRU */
2753 case RESERVE: 3153 case RESERVE:
2754 case RELEASE: 3154 case RELEASE:
2755 case REZERO_UNIT: 3155 case REZERO_UNIT:
2756 case REASSIGN_BLOCKS: 3156 case REASSIGN_BLOCKS:
2757 case SEEK_10: 3157 case SEEK_10:
2758 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 3158 scsicmd->result = AAC_STAT_GOOD;
2759 scsicmd->scsi_done(scsicmd); 3159 break;
2760 return 0;
2761 3160
2762 case START_STOP: 3161 case START_STOP:
2763 return aac_start_stop(scsicmd); 3162 return aac_start_stop(scsicmd);
2764 }
2765
2766 switch (scsicmd->cmnd[0])
2767 {
2768 case READ_6:
2769 case READ_10:
2770 case READ_12:
2771 case READ_16:
2772 if (dev->in_reset)
2773 return -1;
2774 /*
2775 * Hack to keep track of ordinal number of the device that
2776 * corresponds to a container. Needed to convert
2777 * containers to /dev/sd device names
2778 */
2779
2780 if (scsicmd->request->rq_disk)
2781 strlcpy(fsa_dev_ptr[cid].devname,
2782 scsicmd->request->rq_disk->disk_name,
2783 min(sizeof(fsa_dev_ptr[cid].devname),
2784 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
2785
2786 return aac_read(scsicmd);
2787 3163
2788 case WRITE_6: 3164 /* FALLTHRU */
2789 case WRITE_10: 3165 default:
2790 case WRITE_12: 3166 /*
2791 case WRITE_16: 3167 * Unhandled commands
2792 if (dev->in_reset) 3168 */
2793 return -1; 3169 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
2794 return aac_write(scsicmd); 3170 scsicmd->cmnd[0]));
2795 3171 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2796 case SYNCHRONIZE_CACHE: 3172 SAM_STAT_CHECK_CONDITION;
2797 if (((aac_cache & 6) == 6) && dev->cache_protected) { 3173 set_sense(&dev->fsa_dev[cid].sense_data,
2798 scsicmd->result = DID_OK << 16 |
2799 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2800 scsicmd->scsi_done(scsicmd);
2801 return 0;
2802 }
2803 /* Issue FIB to tell Firmware to flush it's cache */
2804 if ((aac_cache & 6) != 2)
2805 return aac_synchronize(scsicmd);
2806 /* FALLTHRU */
2807 default:
2808 /*
2809 * Unhandled commands
2810 */
2811 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
2812 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2813 set_sense(&dev->fsa_dev[cid].sense_data,
2814 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 3174 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2815 ASENCODE_INVALID_COMMAND, 0, 0); 3175 ASENCODE_INVALID_COMMAND, 0, 0);
2816 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 3176 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2817 min_t(size_t, 3177 min_t(size_t,
2818 sizeof(dev->fsa_dev[cid].sense_data), 3178 sizeof(dev->fsa_dev[cid].sense_data),
2819 SCSI_SENSE_BUFFERSIZE)); 3179 SCSI_SENSE_BUFFERSIZE));
2820 scsicmd->scsi_done(scsicmd);
2821 return 0;
2822 } 3180 }
3181
3182scsi_done_ret:
3183
3184 scsicmd->scsi_done(scsicmd);
3185 return 0;
2823} 3186}
2824 3187
2825static int query_disk(struct aac_dev *dev, void __user *arg) 3188static int query_disk(struct aac_dev *dev, void __user *arg)
@@ -2954,16 +3317,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2954 return; 3317 return;
2955 3318
2956 BUG_ON(fibptr == NULL); 3319 BUG_ON(fibptr == NULL);
2957 dev = fibptr->dev;
2958 3320
2959 scsi_dma_unmap(scsicmd); 3321 dev = fibptr->dev;
2960
2961 /* expose physical device if expose_physicald flag is on */
2962 if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
2963 && expose_physicals > 0)
2964 aac_expose_phy_device(scsicmd);
2965 3322
2966 srbreply = (struct aac_srb_reply *) fib_data(fibptr); 3323 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
3324
2967 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ 3325 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
2968 3326
2969 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { 3327 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
@@ -2976,158 +3334,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2976 */ 3334 */
2977 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) 3335 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
2978 - le32_to_cpu(srbreply->data_xfer_length)); 3336 - le32_to_cpu(srbreply->data_xfer_length));
2979 /* 3337 }
2980 * First check the fib status
2981 */
2982 3338
2983 if (le32_to_cpu(srbreply->status) != ST_OK) {
2984 int len;
2985 3339
2986 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status)); 3340 scsi_dma_unmap(scsicmd);
2987 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
2988 SCSI_SENSE_BUFFERSIZE);
2989 scsicmd->result = DID_ERROR << 16
2990 | COMMAND_COMPLETE << 8
2991 | SAM_STAT_CHECK_CONDITION;
2992 memcpy(scsicmd->sense_buffer,
2993 srbreply->sense_data, len);
2994 }
2995 3341
2996 /* 3342 /* expose physical device if expose_physicald flag is on */
2997 * Next check the srb status 3343 if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
2998 */ 3344 && expose_physicals > 0)
2999 switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { 3345 aac_expose_phy_device(scsicmd);
3000 case SRB_STATUS_ERROR_RECOVERY: 3346
3001 case SRB_STATUS_PENDING: 3347 /*
3002 case SRB_STATUS_SUCCESS: 3348 * First check the fib status
3003 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 3349 */
3004 break; 3350
3005 case SRB_STATUS_DATA_OVERRUN: 3351 if (le32_to_cpu(srbreply->status) != ST_OK) {
3006 switch (scsicmd->cmnd[0]) { 3352 int len;
3007 case READ_6: 3353
3008 case WRITE_6: 3354 pr_warn("aac_srb_callback: srb failed, status = %d\n",
3009 case READ_10: 3355 le32_to_cpu(srbreply->status));
3010 case WRITE_10: 3356 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3011 case READ_12: 3357 SCSI_SENSE_BUFFERSIZE);
3012 case WRITE_12: 3358 scsicmd->result = DID_ERROR << 16
3013 case READ_16: 3359 | COMMAND_COMPLETE << 8
3014 case WRITE_16: 3360 | SAM_STAT_CHECK_CONDITION;
3015 if (le32_to_cpu(srbreply->data_xfer_length) 3361 memcpy(scsicmd->sense_buffer,
3016 < scsicmd->underflow) 3362 srbreply->sense_data, len);
3017 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); 3363 }
3018 else 3364
3019 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n"); 3365 /*
3020 scsicmd->result = DID_ERROR << 16 3366 * Next check the srb status
3021 | COMMAND_COMPLETE << 8; 3367 */
3022 break; 3368 switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
3023 case INQUIRY: { 3369 case SRB_STATUS_ERROR_RECOVERY:
3024 scsicmd->result = DID_OK << 16 3370 case SRB_STATUS_PENDING:
3025 | COMMAND_COMPLETE << 8; 3371 case SRB_STATUS_SUCCESS:
3026 break; 3372 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3027 } 3373 break;
3028 default: 3374 case SRB_STATUS_DATA_OVERRUN:
3029 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 3375 switch (scsicmd->cmnd[0]) {
3030 break; 3376 case READ_6:
3031 } 3377 case WRITE_6:
3032 break; 3378 case READ_10:
3033 case SRB_STATUS_ABORTED: 3379 case WRITE_10:
3034 scsicmd->result = DID_ABORT << 16 | ABORT << 8; 3380 case READ_12:
3035 break; 3381 case WRITE_12:
3036 case SRB_STATUS_ABORT_FAILED: 3382 case READ_16:
3037 /* 3383 case WRITE_16:
3038 * Not sure about this one - but assuming the 3384 if (le32_to_cpu(srbreply->data_xfer_length)
3039 * hba was trying to abort for some reason 3385 < scsicmd->underflow)
3040 */ 3386 pr_warn("aacraid: SCSI CMD underflow\n");
3041 scsicmd->result = DID_ERROR << 16 | ABORT << 8; 3387 else
3388 pr_warn("aacraid: SCSI CMD Data Overrun\n");
3389 scsicmd->result = DID_ERROR << 16
3390 | COMMAND_COMPLETE << 8;
3042 break; 3391 break;
3043 case SRB_STATUS_PARITY_ERROR: 3392 case INQUIRY:
3044 scsicmd->result = DID_PARITY << 16 3393 scsicmd->result = DID_OK << 16
3045 | MSG_PARITY_ERROR << 8; 3394 | COMMAND_COMPLETE << 8;
3046 break; 3395 break;
3047 case SRB_STATUS_NO_DEVICE: 3396 default:
3048 case SRB_STATUS_INVALID_PATH_ID: 3397 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3049 case SRB_STATUS_INVALID_TARGET_ID:
3050 case SRB_STATUS_INVALID_LUN:
3051 case SRB_STATUS_SELECTION_TIMEOUT:
3052 scsicmd->result = DID_NO_CONNECT << 16
3053 | COMMAND_COMPLETE << 8;
3054 break; 3398 break;
3399 }
3400 break;
3401 case SRB_STATUS_ABORTED:
3402 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3403 break;
3404 case SRB_STATUS_ABORT_FAILED:
3405 /*
3406 * Not sure about this one - but assuming the
3407 * hba was trying to abort for some reason
3408 */
3409 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
3410 break;
3411 case SRB_STATUS_PARITY_ERROR:
3412 scsicmd->result = DID_PARITY << 16
3413 | MSG_PARITY_ERROR << 8;
3414 break;
3415 case SRB_STATUS_NO_DEVICE:
3416 case SRB_STATUS_INVALID_PATH_ID:
3417 case SRB_STATUS_INVALID_TARGET_ID:
3418 case SRB_STATUS_INVALID_LUN:
3419 case SRB_STATUS_SELECTION_TIMEOUT:
3420 scsicmd->result = DID_NO_CONNECT << 16
3421 | COMMAND_COMPLETE << 8;
3422 break;
3055 3423
3056 case SRB_STATUS_COMMAND_TIMEOUT: 3424 case SRB_STATUS_COMMAND_TIMEOUT:
3057 case SRB_STATUS_TIMEOUT: 3425 case SRB_STATUS_TIMEOUT:
3058 scsicmd->result = DID_TIME_OUT << 16 3426 scsicmd->result = DID_TIME_OUT << 16
3059 | COMMAND_COMPLETE << 8; 3427 | COMMAND_COMPLETE << 8;
3060 break; 3428 break;
3061 3429
3062 case SRB_STATUS_BUSY: 3430 case SRB_STATUS_BUSY:
3063 scsicmd->result = DID_BUS_BUSY << 16 3431 scsicmd->result = DID_BUS_BUSY << 16
3064 | COMMAND_COMPLETE << 8; 3432 | COMMAND_COMPLETE << 8;
3065 break; 3433 break;
3066 3434
3067 case SRB_STATUS_BUS_RESET: 3435 case SRB_STATUS_BUS_RESET:
3068 scsicmd->result = DID_RESET << 16 3436 scsicmd->result = DID_RESET << 16
3069 | COMMAND_COMPLETE << 8; 3437 | COMMAND_COMPLETE << 8;
3070 break; 3438 break;
3071 3439
3072 case SRB_STATUS_MESSAGE_REJECTED: 3440 case SRB_STATUS_MESSAGE_REJECTED:
3073 scsicmd->result = DID_ERROR << 16 3441 scsicmd->result = DID_ERROR << 16
3074 | MESSAGE_REJECT << 8; 3442 | MESSAGE_REJECT << 8;
3075 break; 3443 break;
3076 case SRB_STATUS_REQUEST_FLUSHED: 3444 case SRB_STATUS_REQUEST_FLUSHED:
3077 case SRB_STATUS_ERROR: 3445 case SRB_STATUS_ERROR:
3078 case SRB_STATUS_INVALID_REQUEST: 3446 case SRB_STATUS_INVALID_REQUEST:
3079 case SRB_STATUS_REQUEST_SENSE_FAILED: 3447 case SRB_STATUS_REQUEST_SENSE_FAILED:
3080 case SRB_STATUS_NO_HBA: 3448 case SRB_STATUS_NO_HBA:
3081 case SRB_STATUS_UNEXPECTED_BUS_FREE: 3449 case SRB_STATUS_UNEXPECTED_BUS_FREE:
3082 case SRB_STATUS_PHASE_SEQUENCE_FAILURE: 3450 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
3083 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: 3451 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
3084 case SRB_STATUS_DELAYED_RETRY: 3452 case SRB_STATUS_DELAYED_RETRY:
3085 case SRB_STATUS_BAD_FUNCTION: 3453 case SRB_STATUS_BAD_FUNCTION:
3086 case SRB_STATUS_NOT_STARTED: 3454 case SRB_STATUS_NOT_STARTED:
3087 case SRB_STATUS_NOT_IN_USE: 3455 case SRB_STATUS_NOT_IN_USE:
3088 case SRB_STATUS_FORCE_ABORT: 3456 case SRB_STATUS_FORCE_ABORT:
3089 case SRB_STATUS_DOMAIN_VALIDATION_FAIL: 3457 case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
3090 default: 3458 default:
3091#ifdef AAC_DETAILED_STATUS_INFO 3459#ifdef AAC_DETAILED_STATUS_INFO
3092 printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n", 3460 pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
3093 le32_to_cpu(srbreply->srb_status) & 0x3F, 3461 le32_to_cpu(srbreply->srb_status) & 0x3F,
3094 aac_get_status_string( 3462 aac_get_status_string(
3095 le32_to_cpu(srbreply->srb_status) & 0x3F), 3463 le32_to_cpu(srbreply->srb_status) & 0x3F),
3096 scsicmd->cmnd[0], 3464 scsicmd->cmnd[0],
3097 le32_to_cpu(srbreply->scsi_status)); 3465 le32_to_cpu(srbreply->scsi_status));
3098#endif 3466#endif
3099 if ((scsicmd->cmnd[0] == ATA_12) 3467 /*
3100 || (scsicmd->cmnd[0] == ATA_16)) { 3468 * When the CC bit is SET by the host in ATA pass thru CDB,
3101 if (scsicmd->cmnd[2] & (0x01 << 5)) { 3469 * driver is supposed to return DID_OK
3102 scsicmd->result = DID_OK << 16 3470 *
3103 | COMMAND_COMPLETE << 8; 3471 * When the CC bit is RESET by the host, driver should
3104 break; 3472 * return DID_ERROR
3105 } else { 3473 */
3106 scsicmd->result = DID_ERROR << 16 3474 if ((scsicmd->cmnd[0] == ATA_12)
3107 | COMMAND_COMPLETE << 8; 3475 || (scsicmd->cmnd[0] == ATA_16)) {
3108 break; 3476
3109 } 3477 if (scsicmd->cmnd[2] & (0x01 << 5)) {
3478 scsicmd->result = DID_OK << 16
3479 | COMMAND_COMPLETE << 8;
3480 break;
3110 } else { 3481 } else {
3111 scsicmd->result = DID_ERROR << 16 3482 scsicmd->result = DID_ERROR << 16
3112 | COMMAND_COMPLETE << 8; 3483 | COMMAND_COMPLETE << 8;
3113 break; 3484 break;
3114 } 3485 }
3486 } else {
3487 scsicmd->result = DID_ERROR << 16
3488 | COMMAND_COMPLETE << 8;
3489 break;
3115 } 3490 }
3116 if (le32_to_cpu(srbreply->scsi_status) 3491 }
3117 == SAM_STAT_CHECK_CONDITION) { 3492 if (le32_to_cpu(srbreply->scsi_status)
3118 int len; 3493 == SAM_STAT_CHECK_CONDITION) {
3494 int len;
3119 3495
3120 scsicmd->result |= SAM_STAT_CHECK_CONDITION; 3496 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
3121 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), 3497 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
3122 SCSI_SENSE_BUFFERSIZE); 3498 SCSI_SENSE_BUFFERSIZE);
3123#ifdef AAC_DETAILED_STATUS_INFO 3499#ifdef AAC_DETAILED_STATUS_INFO
3124 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 3500 pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
3125 le32_to_cpu(srbreply->status), len); 3501 le32_to_cpu(srbreply->status), len);
3126#endif 3502#endif
3127 memcpy(scsicmd->sense_buffer, 3503 memcpy(scsicmd->sense_buffer,
3128 srbreply->sense_data, len); 3504 srbreply->sense_data, len);
3129 }
3130 } 3505 }
3506
3131 /* 3507 /*
3132 * OR in the scsi status (already shifted up a bit) 3508 * OR in the scsi status (already shifted up a bit)
3133 */ 3509 */
@@ -3137,9 +3513,152 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
3137 scsicmd->scsi_done(scsicmd); 3513 scsicmd->scsi_done(scsicmd);
3138} 3514}
3139 3515
3516static void hba_resp_task_complete(struct aac_dev *dev,
3517 struct scsi_cmnd *scsicmd,
3518 struct aac_hba_resp *err) {
3519
3520 scsicmd->result = err->status;
3521 /* set residual count */
3522 scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
3523
3524 switch (err->status) {
3525 case SAM_STAT_GOOD:
3526 scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3527 break;
3528 case SAM_STAT_CHECK_CONDITION:
3529 {
3530 int len;
3531
3532 len = min_t(u8, err->sense_response_data_len,
3533 SCSI_SENSE_BUFFERSIZE);
3534 if (len)
3535 memcpy(scsicmd->sense_buffer,
3536 err->sense_response_buf, len);
3537 scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
3538 break;
3539 }
3540 case SAM_STAT_BUSY:
3541 scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
3542 break;
3543 case SAM_STAT_TASK_ABORTED:
3544 scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
3545 break;
3546 case SAM_STAT_RESERVATION_CONFLICT:
3547 case SAM_STAT_TASK_SET_FULL:
3548 default:
3549 scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3550 break;
3551 }
3552}
3553
3554static void hba_resp_task_failure(struct aac_dev *dev,
3555 struct scsi_cmnd *scsicmd,
3556 struct aac_hba_resp *err)
3557{
3558 switch (err->status) {
3559 case HBA_RESP_STAT_HBAMODE_DISABLED:
3560 {
3561 u32 bus, cid;
3562
3563 bus = aac_logical_to_phys(scmd_channel(scsicmd));
3564 cid = scmd_id(scsicmd);
3565 if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
3566 dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
3567 dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
3568 }
3569 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3570 break;
3571 }
3572 case HBA_RESP_STAT_IO_ERROR:
3573 case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
3574 scsicmd->result = DID_OK << 16 |
3575 COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
3576 break;
3577 case HBA_RESP_STAT_IO_ABORTED:
3578 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
3579 break;
3580 case HBA_RESP_STAT_INVALID_DEVICE:
3581 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3582 break;
3583 case HBA_RESP_STAT_UNDERRUN:
3584 /* UNDERRUN is OK */
3585 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3586 break;
3587 case HBA_RESP_STAT_OVERRUN:
3588 default:
3589 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3590 break;
3591 }
3592}
3593
3594/**
3595 *
3596 * aac_hba_callback
3597 * @context: the context set in the fib - here it is scsi cmd
3598 * @fibptr: pointer to the fib
3599 *
3600 * Handles the completion of a native HBA scsi command
3601 *
3602 */
3603void aac_hba_callback(void *context, struct fib *fibptr)
3604{
3605 struct aac_dev *dev;
3606 struct scsi_cmnd *scsicmd;
3607
3608 struct aac_hba_resp *err =
3609 &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
3610
3611 scsicmd = (struct scsi_cmnd *) context;
3612
3613 if (!aac_valid_context(scsicmd, fibptr))
3614 return;
3615
3616 WARN_ON(fibptr == NULL);
3617 dev = fibptr->dev;
3618
3619 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
3620 scsi_dma_unmap(scsicmd);
3621
3622 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
3623 /* fast response */
3624 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3625 goto out;
3626 }
3627
3628 switch (err->service_response) {
3629 case HBA_RESP_SVCRES_TASK_COMPLETE:
3630 hba_resp_task_complete(dev, scsicmd, err);
3631 break;
3632 case HBA_RESP_SVCRES_FAILURE:
3633 hba_resp_task_failure(dev, scsicmd, err);
3634 break;
3635 case HBA_RESP_SVCRES_TMF_REJECTED:
3636 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
3637 break;
3638 case HBA_RESP_SVCRES_TMF_LUN_INVALID:
3639 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
3640 break;
3641 case HBA_RESP_SVCRES_TMF_COMPLETE:
3642 case HBA_RESP_SVCRES_TMF_SUCCEEDED:
3643 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
3644 break;
3645 default:
3646 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
3647 break;
3648 }
3649
3650out:
3651 aac_fib_complete(fibptr);
3652
3653 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
3654 scsicmd->SCp.sent_command = 1;
3655 else
3656 scsicmd->scsi_done(scsicmd);
3657}
3658
3140/** 3659/**
3141 * 3660 *
3142 * aac_send_scb_fib 3661 * aac_send_srb_fib
3143 * @scsicmd: the scsi command block 3662 * @scsicmd: the scsi command block
3144 * 3663 *
3145 * This routine will form a FIB and fill in the aac_srb from the 3664 * This routine will form a FIB and fill in the aac_srb from the
@@ -3182,6 +3701,54 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
3182 return -1; 3701 return -1;
3183} 3702}
3184 3703
3704/**
3705 *
3706 * aac_send_hba_fib
3707 * @scsicmd: the scsi command block
3708 *
3709 * This routine will form a FIB and fill in the aac_hba_cmd_req from the
3710 * scsicmd passed in.
3711 */
3712static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
3713{
3714 struct fib *cmd_fibcontext;
3715 struct aac_dev *dev;
3716 int status;
3717
3718 dev = shost_priv(scsicmd->device->host);
3719 if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
3720 scsicmd->device->lun > AAC_MAX_LUN - 1) {
3721 scsicmd->result = DID_NO_CONNECT << 16;
3722 scsicmd->scsi_done(scsicmd);
3723 return 0;
3724 }
3725
3726 /*
3727 * Allocate and initialize a Fib then setup a BlockWrite command
3728 */
3729 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
3730 if (!cmd_fibcontext)
3731 return -1;
3732
3733 status = aac_adapter_hba(cmd_fibcontext, scsicmd);
3734
3735 /*
3736 * Check that the command queued to the controller
3737 */
3738 if (status == -EINPROGRESS) {
3739 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
3740 return 0;
3741 }
3742
3743 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
3744 status);
3745 aac_fib_complete(cmd_fibcontext);
3746 aac_fib_free(cmd_fibcontext);
3747
3748 return -1;
3749}
3750
3751
3185static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) 3752static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3186{ 3753{
3187 struct aac_dev *dev; 3754 struct aac_dev *dev;
@@ -3434,6 +4001,75 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
3434 return 0; 4001 return 0;
3435} 4002}
3436 4003
4004static long aac_build_sghba(struct scsi_cmnd *scsicmd,
4005 struct aac_hba_cmd_req *hbacmd,
4006 int sg_max,
4007 u64 sg_address)
4008{
4009 unsigned long byte_count = 0;
4010 int nseg;
4011 struct scatterlist *sg;
4012 int i;
4013 u32 cur_size;
4014 struct aac_hba_sgl *sge;
4015
4016 nseg = scsi_dma_map(scsicmd);
4017 if (nseg <= 0) {
4018 byte_count = nseg;
4019 goto out;
4020 }
4021
4022 if (nseg > HBA_MAX_SG_EMBEDDED)
4023 sge = &hbacmd->sge[2];
4024 else
4025 sge = &hbacmd->sge[0];
4026
4027 scsi_for_each_sg(scsicmd, sg, nseg, i) {
4028 int count = sg_dma_len(sg);
4029 u64 addr = sg_dma_address(sg);
4030
4031 WARN_ON(i >= sg_max);
4032 sge->addr_hi = cpu_to_le32((u32)(addr>>32));
4033 sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
4034 cur_size = cpu_to_le32(count);
4035 sge->len = cur_size;
4036 sge->flags = 0;
4037 byte_count += count;
4038 sge++;
4039 }
4040
4041 sge--;
4042 /* hba wants the size to be exact */
4043 if (byte_count > scsi_bufflen(scsicmd)) {
4044 u32 temp;
4045
4046 temp = le32_to_cpu(sge->len) - byte_count
4047 - scsi_bufflen(scsicmd);
4048 sge->len = cpu_to_le32(temp);
4049 byte_count = scsi_bufflen(scsicmd);
4050 }
4051
4052 if (nseg <= HBA_MAX_SG_EMBEDDED) {
4053 hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
4054 sge->flags = cpu_to_le32(0x40000000);
4055 } else {
4056 /* not embedded */
4057 hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
4058 hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
4059 hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
4060 hbacmd->sge[0].addr_lo =
4061 cpu_to_le32((u32)(sg_address & 0xffffffff));
4062 }
4063
4064 /* Check for command underflow */
4065 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
4066 pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
4067 byte_count, scsicmd->underflow);
4068 }
4069out:
4070 return byte_count;
4071}
4072
3437#ifdef AAC_DETAILED_STATUS_INFO 4073#ifdef AAC_DETAILED_STATUS_INFO
3438 4074
3439struct aac_srb_status_info { 4075struct aac_srb_status_info {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index f059c14efa0c..f2344971e3cb 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,3 +1,37 @@
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 * Module Name:
27 * aacraid.h
28 *
29 * Abstract: Contains all routines for control of the aacraid driver
30 *
31 */
32
33#ifndef _AACRAID_H_
34#define _AACRAID_H_
1#ifndef dprintk 35#ifndef dprintk
2# define dprintk(x) 36# define dprintk(x)
3#endif 37#endif
@@ -63,8 +97,8 @@ enum {
63#define PMC_GLOBAL_INT_BIT0 0x00000001 97#define PMC_GLOBAL_INT_BIT0 0x00000001
64 98
65#ifndef AAC_DRIVER_BUILD 99#ifndef AAC_DRIVER_BUILD
66# define AAC_DRIVER_BUILD 41066 100# define AAC_DRIVER_BUILD 50740
67# define AAC_DRIVER_BRANCH "-ms" 101# define AAC_DRIVER_BRANCH "-custom"
68#endif 102#endif
69#define MAXIMUM_NUM_CONTAINERS 32 103#define MAXIMUM_NUM_CONTAINERS 32
70 104
@@ -72,13 +106,311 @@ enum {
72#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB) 106#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
73#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB) 107#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
74 108
75#define AAC_MAX_LUN (8) 109#define AAC_MAX_LUN 256
76 110
77#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff) 111#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
78#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256) 112#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
79 113
80#define AAC_DEBUG_INSTRUMENT_AIF_DELETE 114#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
81 115
116#define AAC_MAX_NATIVE_TARGETS 1024
117/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
118#define AAC_MAX_BUSES 5
119#define AAC_MAX_TARGETS 256
120#define AAC_MAX_NATIVE_SIZE 2048
121#define FW_ERROR_BUFFER_SIZE 512
122
123/* Thor AIF events */
124#define SA_AIF_HOTPLUG (1<<1)
125#define SA_AIF_HARDWARE (1<<2)
126#define SA_AIF_PDEV_CHANGE (1<<4)
127#define SA_AIF_LDEV_CHANGE (1<<5)
128#define SA_AIF_BPSTAT_CHANGE (1<<30)
129#define SA_AIF_BPCFG_CHANGE (1<<31)
130
131#define HBA_MAX_SG_EMBEDDED 28
132#define HBA_MAX_SG_SEPARATE 90
133#define HBA_SENSE_DATA_LEN_MAX 32
134#define HBA_REQUEST_TAG_ERROR_FLAG 0x00000002
135#define HBA_SGL_FLAGS_EXT 0x80000000UL
136
137struct aac_hba_sgl {
138 u32 addr_lo; /* Lower 32-bits of SGL element address */
139 u32 addr_hi; /* Upper 32-bits of SGL element address */
140 u32 len; /* Length of SGL element in bytes */
141 u32 flags; /* SGL element flags */
142};
143
144enum {
145 HBA_IU_TYPE_SCSI_CMD_REQ = 0x40,
146 HBA_IU_TYPE_SCSI_TM_REQ = 0x41,
147 HBA_IU_TYPE_SATA_REQ = 0x42,
148 HBA_IU_TYPE_RESP = 0x60,
149 HBA_IU_TYPE_COALESCED_RESP = 0x61,
150 HBA_IU_TYPE_INT_COALESCING_CFG_REQ = 0x70
151};
152
153enum {
154 HBA_CMD_BYTE1_DATA_DIR_IN = 0x1,
155 HBA_CMD_BYTE1_DATA_DIR_OUT = 0x2,
156 HBA_CMD_BYTE1_DATA_TYPE_DDR = 0x4,
157 HBA_CMD_BYTE1_CRYPTO_ENABLE = 0x8
158};
159
160enum {
161 HBA_CMD_BYTE1_BITOFF_DATA_DIR_IN = 0x0,
162 HBA_CMD_BYTE1_BITOFF_DATA_DIR_OUT,
163 HBA_CMD_BYTE1_BITOFF_DATA_TYPE_DDR,
164 HBA_CMD_BYTE1_BITOFF_CRYPTO_ENABLE
165};
166
167enum {
168 HBA_RESP_DATAPRES_NO_DATA = 0x0,
169 HBA_RESP_DATAPRES_RESPONSE_DATA,
170 HBA_RESP_DATAPRES_SENSE_DATA
171};
172
173enum {
174 HBA_RESP_SVCRES_TASK_COMPLETE = 0x0,
175 HBA_RESP_SVCRES_FAILURE,
176 HBA_RESP_SVCRES_TMF_COMPLETE,
177 HBA_RESP_SVCRES_TMF_SUCCEEDED,
178 HBA_RESP_SVCRES_TMF_REJECTED,
179 HBA_RESP_SVCRES_TMF_LUN_INVALID
180};
181
182enum {
183 HBA_RESP_STAT_IO_ERROR = 0x1,
184 HBA_RESP_STAT_IO_ABORTED,
185 HBA_RESP_STAT_NO_PATH_TO_DEVICE,
186 HBA_RESP_STAT_INVALID_DEVICE,
187 HBA_RESP_STAT_HBAMODE_DISABLED = 0xE,
188 HBA_RESP_STAT_UNDERRUN = 0x51,
189 HBA_RESP_STAT_OVERRUN = 0x75
190};
191
192struct aac_hba_cmd_req {
193 u8 iu_type; /* HBA information unit type */
194 /*
195 * byte1:
196 * [1:0] DIR - 0=No data, 0x1 = IN, 0x2 = OUT
197 * [2] TYPE - 0=PCI, 1=DDR
198 * [3] CRYPTO_ENABLE - 0=Crypto disabled, 1=Crypto enabled
199 */
200 u8 byte1;
201 u8 reply_qid; /* Host reply queue to post response to */
202 u8 reserved1;
203 __le32 it_nexus; /* Device handle for the request */
204 __le32 request_id; /* Sender context */
205 /* Lower 32-bits of tweak value for crypto enabled IOs */
206 __le32 tweak_value_lo;
207 u8 cdb[16]; /* SCSI CDB of the command */
208 u8 lun[8]; /* SCSI LUN of the command */
209
210 /* Total data length in bytes to be read/written (if any) */
211 __le32 data_length;
212
213 /* [2:0] Task Attribute, [6:3] Command Priority */
214 u8 attr_prio;
215
216 /* Number of SGL elements embedded in the HBA req */
217 u8 emb_data_desc_count;
218
219 __le16 dek_index; /* DEK index for crypto enabled IOs */
220
221 /* Lower 32-bits of reserved error data target location on the host */
222 __le32 error_ptr_lo;
223
224 /* Upper 32-bits of reserved error data target location on the host */
225 __le32 error_ptr_hi;
226
227 /* Length of reserved error data area on the host in bytes */
228 __le32 error_length;
229
230 /* Upper 32-bits of tweak value for crypto enabled IOs */
231 __le32 tweak_value_hi;
232
233 struct aac_hba_sgl sge[HBA_MAX_SG_SEPARATE+2]; /* SG list space */
234
235 /*
236 * structure must not exceed
237 * AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE
238 */
239};
240
241/* Task Management Functions (TMF) */
242#define HBA_TMF_ABORT_TASK 0x01
243#define HBA_TMF_LUN_RESET 0x08
244
245struct aac_hba_tm_req {
246 u8 iu_type; /* HBA information unit type */
247 u8 reply_qid; /* Host reply queue to post response to */
248 u8 tmf; /* Task management function */
249 u8 reserved1;
250
251 __le32 it_nexus; /* Device handle for the command */
252
253 u8 lun[8]; /* SCSI LUN */
254
255 /* Used to hold sender context. */
256 __le32 request_id; /* Sender context */
257 __le32 reserved2;
258
259 /* Request identifier of managed task */
260 __le32 managed_request_id; /* Sender context being managed */
261 __le32 reserved3;
262
263 /* Lower 32-bits of reserved error data target location on the host */
264 __le32 error_ptr_lo;
265 /* Upper 32-bits of reserved error data target location on the host */
266 __le32 error_ptr_hi;
267 /* Length of reserved error data area on the host in bytes */
268 __le32 error_length;
269};
270
271struct aac_hba_reset_req {
272 u8 iu_type; /* HBA information unit type */
273 /* 0 - reset specified device, 1 - reset all devices */
274 u8 reset_type;
275 u8 reply_qid; /* Host reply queue to post response to */
276 u8 reserved1;
277
278 __le32 it_nexus; /* Device handle for the command */
279 __le32 request_id; /* Sender context */
280 /* Lower 32-bits of reserved error data target location on the host */
281 __le32 error_ptr_lo;
282 /* Upper 32-bits of reserved error data target location on the host */
283 __le32 error_ptr_hi;
284 /* Length of reserved error data area on the host in bytes */
285 __le32 error_length;
286};
287
288struct aac_hba_resp {
289 u8 iu_type; /* HBA information unit type */
290 u8 reserved1[3];
291 __le32 request_identifier; /* sender context */
292 __le32 reserved2;
293 u8 service_response; /* SCSI service response */
294 u8 status; /* SCSI status */
295 u8 datapres; /* [1:0] - data present, [7:2] - reserved */
296 u8 sense_response_data_len; /* Sense/response data length */
297 __le32 residual_count; /* Residual data length in bytes */
298 /* Sense/response data */
299 u8 sense_response_buf[HBA_SENSE_DATA_LEN_MAX];
300};
301
302struct aac_native_hba {
303 union {
304 struct aac_hba_cmd_req cmd;
305 struct aac_hba_tm_req tmr;
306 u8 cmd_bytes[AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE];
307 } cmd;
308 union {
309 struct aac_hba_resp err;
310 u8 resp_bytes[FW_ERROR_BUFFER_SIZE];
311 } resp;
312};
313
314#define CISS_REPORT_PHYSICAL_LUNS 0xc3
315#define WRITE_HOST_WELLNESS 0xa5
316#define CISS_IDENTIFY_PHYSICAL_DEVICE 0x15
317#define BMIC_IN 0x26
318#define BMIC_OUT 0x27
319
320struct aac_ciss_phys_luns_resp {
321 u8 list_length[4]; /* LUN list length (N-7, big endian) */
322 u8 resp_flag; /* extended response_flag */
323 u8 reserved[3];
324 struct _ciss_lun {
325 u8 tid[3]; /* Target ID */
326 u8 bus; /* Bus, flag (bits 6,7) */
327 u8 level3[2];
328 u8 level2[2];
329 u8 node_ident[16]; /* phys. node identifier */
330 } lun[1]; /* List of phys. devices */
331};
332
333/*
334 * Interrupts
335 */
336#define AAC_MAX_HRRQ 64
337
338struct aac_ciss_identify_pd {
339 u8 scsi_bus; /* SCSI Bus number on controller */
340 u8 scsi_id; /* SCSI ID on this bus */
341 u16 block_size; /* sector size in bytes */
342 u32 total_blocks; /* number for sectors on drive */
343 u32 reserved_blocks; /* controller reserved (RIS) */
344 u8 model[40]; /* Physical Drive Model */
345 u8 serial_number[40]; /* Drive Serial Number */
346 u8 firmware_revision[8]; /* drive firmware revision */
347 u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
348 u8 compaq_drive_stamp; /* 0 means drive not stamped */
349 u8 last_failure_reason;
350
351 u8 flags;
352 u8 more_flags;
353 u8 scsi_lun; /* SCSI LUN for phys drive */
354 u8 yet_more_flags;
355 u8 even_more_flags;
356 u32 spi_speed_rules; /* SPI Speed :Ultra disable diagnose */
357 u8 phys_connector[2]; /* connector number on controller */
358 u8 phys_box_on_bus; /* phys enclosure this drive resides */
359 u8 phys_bay_in_box; /* phys drv bay this drive resides */
360 u32 rpm; /* Drive rotational speed in rpm */
361 u8 device_type; /* type of drive */
362 u8 sata_version; /* only valid when drive_type is SATA */
363 u64 big_total_block_count;
364 u64 ris_starting_lba;
365 u32 ris_size;
366 u8 wwid[20];
367 u8 controller_phy_map[32];
368 u16 phy_count;
369 u8 phy_connected_dev_type[256];
370 u8 phy_to_drive_bay_num[256];
371 u16 phy_to_attached_dev_index[256];
372 u8 box_index;
373 u8 spitfire_support;
374 u16 extra_physical_drive_flags;
375 u8 negotiated_link_rate[256];
376 u8 phy_to_phy_map[256];
377 u8 redundant_path_present_map;
378 u8 redundant_path_failure_map;
379 u8 active_path_number;
380 u16 alternate_paths_phys_connector[8];
381 u8 alternate_paths_phys_box_on_port[8];
382 u8 multi_lun_device_lun_count;
383 u8 minimum_good_fw_revision[8];
384 u8 unique_inquiry_bytes[20];
385 u8 current_temperature_degreesC;
386 u8 temperature_threshold_degreesC;
387 u8 max_temperature_degreesC;
388 u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512 * 2^exp */
389 u16 current_queue_depth_limit;
390 u8 switch_name[10];
391 u16 switch_port;
392 u8 alternate_paths_switch_name[40];
393 u8 alternate_paths_switch_port[8];
394 u16 power_on_hours; /* valid only if gas gauge supported */
395 u16 percent_endurance_used; /* valid only if gas gauge supported. */
396 u8 drive_authentication;
397 u8 smart_carrier_authentication;
398 u8 smart_carrier_app_fw_version;
399 u8 smart_carrier_bootloader_fw_version;
400 u8 SanitizeSecureEraseSupport;
401 u8 DriveKeyFlags;
402 u8 encryption_key_name[64];
403 u32 misc_drive_flags;
404 u16 dek_index;
405 u16 drive_encryption_flags;
406 u8 sanitize_maximum_time[6];
407 u8 connector_info_mode;
408 u8 connector_info_number[4];
409 u8 long_connector_name[64];
410 u8 device_unique_identifier[16];
411 u8 padto_2K[17];
412} __packed;
413
82/* 414/*
83 * These macros convert from physical channels to virtual channels 415 * These macros convert from physical channels to virtual channels
84 */ 416 */
@@ -86,6 +418,7 @@ enum {
86#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL) 418#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
87#define CONTAINER_TO_ID(cont) (cont) 419#define CONTAINER_TO_ID(cont) (cont)
88#define CONTAINER_TO_LUN(cont) (0) 420#define CONTAINER_TO_LUN(cont) (0)
421#define ENCLOSURE_CHANNEL (3)
89 422
90#define PMC_DEVICE_S6 0x28b 423#define PMC_DEVICE_S6 0x28b
91#define PMC_DEVICE_S7 0x28c 424#define PMC_DEVICE_S7 0x28c
@@ -351,10 +684,10 @@ enum aac_queue_types {
351 684
352/* transport FIB header (PMC) */ 685/* transport FIB header (PMC) */
353struct aac_fib_xporthdr { 686struct aac_fib_xporthdr {
354 u64 HostAddress; /* FIB host address w/o xport header */ 687 __le64 HostAddress; /* FIB host address w/o xport header */
355 u32 Size; /* FIB size excluding xport header */ 688 __le32 Size; /* FIB size excluding xport header */
356 u32 Handle; /* driver handle to reference the FIB */ 689 __le32 Handle; /* driver handle to reference the FIB */
357 u64 Reserved[2]; 690 __le64 Reserved[2];
358}; 691};
359 692
360#define ALIGN32 32 693#define ALIGN32 32
@@ -379,7 +712,7 @@ struct aac_fibhdr {
379 __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */ 712 __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
380 __le32 TimeStamp; /* otherwise timestamp for FW internal use */ 713 __le32 TimeStamp; /* otherwise timestamp for FW internal use */
381 } u; 714 } u;
382 u32 Handle; /* FIB handle used for MSGU commnunication */ 715 __le32 Handle; /* FIB handle used for MSGU commnunication */
383 u32 Previous; /* FW internal use */ 716 u32 Previous; /* FW internal use */
384 u32 Next; /* FW internal use */ 717 u32 Next; /* FW internal use */
385}; 718};
@@ -489,41 +822,64 @@ enum fib_xfer_state {
489#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science 822#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
490#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */ 823#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
491#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */ 824#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */
825#define ADAPTER_INIT_STRUCT_REVISION_8 8 // Thor
492 826
493struct aac_init 827union aac_init
494{ 828{
495 __le32 InitStructRevision; 829 struct _r7 {
496 __le32 Sa_MSIXVectors; 830 __le32 init_struct_revision;
497 __le32 fsrev; 831 __le32 no_of_msix_vectors;
498 __le32 CommHeaderAddress; 832 __le32 fsrev;
499 __le32 FastIoCommAreaAddress; 833 __le32 comm_header_address;
500 __le32 AdapterFibsPhysicalAddress; 834 __le32 fast_io_comm_area_address;
501 __le32 AdapterFibsVirtualAddress; 835 __le32 adapter_fibs_physical_address;
502 __le32 AdapterFibsSize; 836 __le32 adapter_fibs_virtual_address;
503 __le32 AdapterFibAlign; 837 __le32 adapter_fibs_size;
504 __le32 printfbuf; 838 __le32 adapter_fib_align;
505 __le32 printfbufsiz; 839 __le32 printfbuf;
506 __le32 HostPhysMemPages; /* number of 4k pages of host 840 __le32 printfbufsiz;
507 physical memory */ 841 /* number of 4k pages of host phys. mem. */
508 __le32 HostElapsedSeconds; /* number of seconds since 1970. */ 842 __le32 host_phys_mem_pages;
509 /* 843 /* number of seconds since 1970. */
510 * ADAPTER_INIT_STRUCT_REVISION_4 begins here 844 __le32 host_elapsed_seconds;
511 */ 845 /* ADAPTER_INIT_STRUCT_REVISION_4 begins here */
512 __le32 InitFlags; /* flags for supported features */ 846 __le32 init_flags; /* flags for supported features */
513#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001 847#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
514#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010 848#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
515#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020 849#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
516#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040 850#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040
517#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080 851#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080
518#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100 852#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100
519 __le32 MaxIoCommands; /* max outstanding commands */ 853#define INITFLAGS_DRIVER_SUPPORTS_HBA_MODE 0x00000400
520 __le32 MaxIoSize; /* largest I/O command */ 854 __le32 max_io_commands; /* max outstanding commands */
521 __le32 MaxFibSize; /* largest FIB to adapter */ 855 __le32 max_io_size; /* largest I/O command */
522 /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */ 856 __le32 max_fib_size; /* largest FIB to adapter */
523 __le32 MaxNumAif; /* max number of aif */ 857 /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
524 /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */ 858 __le32 max_num_aif; /* max number of aif */
525 __le32 HostRRQ_AddrLow; 859 /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
526 __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */ 860 /* Host RRQ (response queue) for SRC */
861 __le32 host_rrq_addr_low;
862 __le32 host_rrq_addr_high;
863 } r7;
864 struct _r8 {
865 /* ADAPTER_INIT_STRUCT_REVISION_8 */
866 __le32 init_struct_revision;
867 __le32 rr_queue_count;
868 __le32 host_elapsed_seconds; /* number of secs since 1970. */
869 __le32 init_flags;
870 __le32 max_io_size; /* largest I/O command */
871 __le32 max_num_aif; /* max number of aif */
872 __le32 reserved1;
873 __le32 reserved2;
874 struct _rrq {
875 __le32 host_addr_low;
876 __le32 host_addr_high;
877 __le16 msix_id;
878 __le16 element_count;
879 __le16 comp_thresh;
880 __le16 unused;
881 } rrq[1]; /* up to 64 RRQ addresses */
882 } r8;
527}; 883};
528 884
529enum aac_log_level { 885enum aac_log_level {
@@ -554,7 +910,7 @@ struct adapter_ops
554 void (*adapter_enable_int)(struct aac_dev *dev); 910 void (*adapter_enable_int)(struct aac_dev *dev);
555 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 911 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
556 int (*adapter_check_health)(struct aac_dev *dev); 912 int (*adapter_check_health)(struct aac_dev *dev);
557 int (*adapter_restart)(struct aac_dev *dev, int bled); 913 int (*adapter_restart)(struct aac_dev *dev, int bled, u8 reset_type);
558 void (*adapter_start)(struct aac_dev *dev); 914 void (*adapter_start)(struct aac_dev *dev);
559 /* Transport operations */ 915 /* Transport operations */
560 int (*adapter_ioremap)(struct aac_dev * dev, u32 size); 916 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
@@ -727,6 +1083,7 @@ struct sa_registers {
727 1083
728 1084
729#define SA_INIT_NUM_MSIXVECTORS 1 1085#define SA_INIT_NUM_MSIXVECTORS 1
1086#define SA_MINIPORT_REVISION SA_INIT_NUM_MSIXVECTORS
730 1087
731#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) 1088#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
732#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) 1089#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
@@ -820,32 +1177,37 @@ struct rkt_registers {
820#define src_inbound rx_inbound 1177#define src_inbound rx_inbound
821 1178
822struct src_mu_registers { 1179struct src_mu_registers {
823 /* PCI*| Name */ 1180 /* PCI*| Name */
824 __le32 reserved0[6]; /* 00h | Reserved */ 1181 __le32 reserved0[6]; /* 00h | Reserved */
825 __le32 IOAR[2]; /* 18h | IOA->host interrupt register */ 1182 __le32 IOAR[2]; /* 18h | IOA->host interrupt register */
826 __le32 IDR; /* 20h | Inbound Doorbell Register */ 1183 __le32 IDR; /* 20h | Inbound Doorbell Register */
827 __le32 IISR; /* 24h | Inbound Int. Status Register */ 1184 __le32 IISR; /* 24h | Inbound Int. Status Register */
828 __le32 reserved1[3]; /* 28h | Reserved */ 1185 __le32 reserved1[3]; /* 28h | Reserved */
829 __le32 OIMR; /* 34h | Outbound Int. Mask Register */ 1186 __le32 OIMR; /* 34h | Outbound Int. Mask Register */
830 __le32 reserved2[25]; /* 38h | Reserved */ 1187 __le32 reserved2[25]; /* 38h | Reserved */
831 __le32 ODR_R; /* 9ch | Outbound Doorbell Read */ 1188 __le32 ODR_R; /* 9ch | Outbound Doorbell Read */
832 __le32 ODR_C; /* a0h | Outbound Doorbell Clear */ 1189 __le32 ODR_C; /* a0h | Outbound Doorbell Clear */
833 __le32 reserved3[6]; /* a4h | Reserved */ 1190 __le32 reserved3[3]; /* a4h | Reserved */
834 __le32 OMR; /* bch | Outbound Message Register */ 1191 __le32 SCR0; /* b0h | Scratchpad 0 */
1192 __le32 reserved4[2]; /* b4h | Reserved */
1193 __le32 OMR; /* bch | Outbound Message Register */
835 __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ 1194 __le32 IQ_L; /* c0h | Inbound Queue (Low address) */
836 __le32 IQ_H; /* c4h | Inbound Queue (High address) */ 1195 __le32 IQ_H; /* c4h | Inbound Queue (High address) */
837 __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */ 1196 __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */
1197 __le32 reserved5; /* cch | Reserved */
1198 __le32 IQN_L; /* d0h | Inbound (native cmd) low */
1199 __le32 IQN_H; /* d4h | Inbound (native cmd) high */
838}; 1200};
839 1201
840struct src_registers { 1202struct src_registers {
841 struct src_mu_registers MUnit; /* 00h - cbh */ 1203 struct src_mu_registers MUnit; /* 00h - cbh */
842 union { 1204 union {
843 struct { 1205 struct {
844 __le32 reserved1[130789]; /* cch - 7fc5fh */ 1206 __le32 reserved1[130786]; /* d8h - 7fc5fh */
845 struct src_inbound IndexRegs; /* 7fc60h */ 1207 struct src_inbound IndexRegs; /* 7fc60h */
846 } tupelo; 1208 } tupelo;
847 struct { 1209 struct {
848 __le32 reserved1[973]; /* cch - fffh */ 1210 __le32 reserved1[970]; /* d8h - fffh */
849 struct src_inbound IndexRegs; /* 1000h */ 1211 struct src_inbound IndexRegs; /* 1000h */
850 } denali; 1212 } denali;
851 } u; 1213 } u;
@@ -930,6 +1292,7 @@ struct fsa_dev_info {
930 char devname[8]; 1292 char devname[8];
931 struct sense_data sense_data; 1293 struct sense_data sense_data;
932 u32 block_size; 1294 u32 block_size;
1295 u8 identifier[16];
933}; 1296};
934 1297
935struct fib { 1298struct fib {
@@ -958,8 +1321,30 @@ struct fib {
958 struct list_head fiblink; 1321 struct list_head fiblink;
959 void *data; 1322 void *data;
960 u32 vector_no; 1323 u32 vector_no;
961 struct hw_fib *hw_fib_va; /* Actual shared object */ 1324 struct hw_fib *hw_fib_va; /* also used for native */
962 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/ 1325 dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
1326 dma_addr_t hw_sgl_pa; /* extra sgl for native */
1327 dma_addr_t hw_error_pa; /* error buffer for native */
1328 u32 hbacmd_size; /* cmd size for native */
1329};
1330
1331#define AAC_INIT 0
1332#define AAC_RESCAN 1
1333
1334#define AAC_DEVTYPE_RAID_MEMBER 1
1335#define AAC_DEVTYPE_ARC_RAW 2
1336#define AAC_DEVTYPE_NATIVE_RAW 3
1337#define AAC_EXPOSE_DISK 0
1338#define AAC_HIDE_DISK 3
1339
1340struct aac_hba_map_info {
1341 __le32 rmw_nexus; /* nexus for native HBA devices */
1342 u8 devtype; /* device type */
1343 u8 new_devtype;
1344 u8 reset_state; /* 0 - no reset, 1..x - */
1345 /* after xth TM LUN reset */
1346 u16 qd_limit;
1347 u8 expose; /*checks if to expose or not*/
963}; 1348};
964 1349
965/* 1350/*
@@ -1025,7 +1410,28 @@ struct aac_supplement_adapter_info
1025 /* StructExpansion == 1 */ 1410 /* StructExpansion == 1 */
1026 __le32 FeatureBits3; 1411 __le32 FeatureBits3;
1027 __le32 SupportedPerformanceModes; 1412 __le32 SupportedPerformanceModes;
1028 __le32 ReservedForFutureGrowth[80]; 1413 u8 HostBusType; /* uses HOST_BUS_TYPE_xxx defines */
1414 u8 HostBusWidth; /* actual width in bits or links */
1415 u16 HostBusSpeed; /* actual bus speed/link rate in MHz */
1416 u8 MaxRRCDrives; /* max. number of ITP-RRC drives/pool */
1417 u8 MaxDiskXtasks; /* max. possible num of DiskX Tasks */
1418
1419 u8 CpldVerLoaded;
1420 u8 CpldVerInFlash;
1421
1422 __le64 MaxRRCCapacity;
1423 __le32 CompiledMaxHistLogLevel;
1424 u8 CustomBoardName[12];
1425 u16 SupportedCntlrMode; /* identify supported controller mode */
1426 u16 ReservedForFuture16;
1427 __le32 SupportedOptions3; /* reserved for future options */
1428
1429 __le16 VirtDeviceBus; /* virt. SCSI device for Thor */
1430 __le16 VirtDeviceTarget;
1431 __le16 VirtDeviceLUN;
1432 __le16 Unused;
1433 __le32 ReservedForFutureGrowth[68];
1434
1029}; 1435};
1030#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010) 1436#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
1031#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000) 1437#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
@@ -1099,11 +1505,21 @@ struct aac_bus_info_response {
1099#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16) 1505#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
1100#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) 1506#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
1101#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) 1507#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
1508#define AAC_OPT_EXTENDED cpu_to_le32(1<<23)
1509#define AAC_OPT_NATIVE_HBA cpu_to_le32(1<<25)
1102#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) 1510#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
1103#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29) 1511#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29)
1104#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30) 1512#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
1105#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31) 1513#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
1106 1514
1515#define AAC_COMM_PRODUCER 0
1516#define AAC_COMM_MESSAGE 1
1517#define AAC_COMM_MESSAGE_TYPE1 3
1518#define AAC_COMM_MESSAGE_TYPE2 4
1519#define AAC_COMM_MESSAGE_TYPE3 5
1520
1521#define AAC_EXTOPT_SA_FIRMWARE cpu_to_le32(1<<1)
1522
1107/* MSIX context */ 1523/* MSIX context */
1108struct aac_msix_ctx { 1524struct aac_msix_ctx {
1109 int vector_no; 1525 int vector_no;
@@ -1119,15 +1535,17 @@ struct aac_dev
1119 /* 1535 /*
1120 * negotiated FIB settings 1536 * negotiated FIB settings
1121 */ 1537 */
1122 unsigned max_fib_size; 1538 unsigned int max_fib_size;
1123 unsigned sg_tablesize; 1539 unsigned int sg_tablesize;
1124 unsigned max_num_aif; 1540 unsigned int max_num_aif;
1541
1542 unsigned int max_cmd_size; /* max_fib_size or MAX_NATIVE */
1125 1543
1126 /* 1544 /*
1127 * Map for 128 fib objects (64k) 1545 * Map for 128 fib objects (64k)
1128 */ 1546 */
1129 dma_addr_t hw_fib_pa; 1547 dma_addr_t hw_fib_pa; /* also used for native cmd */
1130 struct hw_fib *hw_fib_va; 1548 struct hw_fib *hw_fib_va; /* also used for native cmd */
1131 struct hw_fib *aif_base_va; 1549 struct hw_fib *aif_base_va;
1132 /* 1550 /*
1133 * Fib Headers 1551 * Fib Headers
@@ -1157,21 +1575,23 @@ struct aac_dev
1157 1575
1158 resource_size_t base_size, dbg_size; /* Size of 1576 resource_size_t base_size, dbg_size; /* Size of
1159 * mapped in region */ 1577 * mapped in region */
1160 1578 /*
1161 struct aac_init *init; /* Holds initialization info to communicate with adapter */ 1579 * Holds initialization info
1580 * to communicate with adapter
1581 */
1582 union aac_init *init;
1162 dma_addr_t init_pa; /* Holds physical address of the init struct */ 1583 dma_addr_t init_pa; /* Holds physical address of the init struct */
1163 1584 /* response queue (if AAC_COMM_MESSAGE_TYPE1) */
1164 u32 *host_rrq; /* response queue 1585 __le32 *host_rrq;
1165 * if AAC_COMM_MESSAGE_TYPE1 */
1166
1167 dma_addr_t host_rrq_pa; /* phys. address */ 1586 dma_addr_t host_rrq_pa; /* phys. address */
1168 /* index into rrq buffer */ 1587 /* index into rrq buffer */
1169 u32 host_rrq_idx[AAC_MAX_MSIX]; 1588 u32 host_rrq_idx[AAC_MAX_MSIX];
1170 atomic_t rrq_outstanding[AAC_MAX_MSIX]; 1589 atomic_t rrq_outstanding[AAC_MAX_MSIX];
1171 u32 fibs_pushed_no; 1590 u32 fibs_pushed_no;
1172 struct pci_dev *pdev; /* Our PCI interface */ 1591 struct pci_dev *pdev; /* Our PCI interface */
1173 void * printfbuf; /* pointer to buffer used for printf's from the adapter */ 1592 /* pointer to buffer used for printf's from the adapter */
1174 void * comm_addr; /* Base address of Comm area */ 1593 void *printfbuf;
1594 void *comm_addr; /* Base address of Comm area */
1175 dma_addr_t comm_phys; /* Physical Address of Comm area */ 1595 dma_addr_t comm_phys; /* Physical Address of Comm area */
1176 size_t comm_size; 1596 size_t comm_size;
1177 1597
@@ -1227,15 +1647,12 @@ struct aac_dev
1227 u8 needs_dac; 1647 u8 needs_dac;
1228 u8 raid_scsi_mode; 1648 u8 raid_scsi_mode;
1229 u8 comm_interface; 1649 u8 comm_interface;
1230# define AAC_COMM_PRODUCER 0
1231# define AAC_COMM_MESSAGE 1
1232# define AAC_COMM_MESSAGE_TYPE1 3
1233# define AAC_COMM_MESSAGE_TYPE2 4
1234 u8 raw_io_interface; 1650 u8 raw_io_interface;
1235 u8 raw_io_64; 1651 u8 raw_io_64;
1236 u8 printf_enabled; 1652 u8 printf_enabled;
1237 u8 in_reset; 1653 u8 in_reset;
1238 u8 msi; 1654 u8 msi;
1655 u8 sa_firmware;
1239 int management_fib_count; 1656 int management_fib_count;
1240 spinlock_t manage_lock; 1657 spinlock_t manage_lock;
1241 spinlock_t sync_lock; 1658 spinlock_t sync_lock;
@@ -1246,7 +1663,10 @@ struct aac_dev
1246 u32 max_msix; /* max. MSI-X vectors */ 1663 u32 max_msix; /* max. MSI-X vectors */
1247 u32 vector_cap; /* MSI-X vector capab.*/ 1664 u32 vector_cap; /* MSI-X vector capab.*/
1248 int msi_enabled; /* MSI/MSI-X enabled */ 1665 int msi_enabled; /* MSI/MSI-X enabled */
1666 atomic_t msix_counter;
1667 struct msix_entry msixentry[AAC_MAX_MSIX];
1249 struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ 1668 struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
1669 struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
1250 u8 adapter_shutdown; 1670 u8 adapter_shutdown;
1251 u32 handle_pci_error; 1671 u32 handle_pci_error;
1252}; 1672};
@@ -1269,8 +1689,8 @@ struct aac_dev
1269#define aac_adapter_check_health(dev) \ 1689#define aac_adapter_check_health(dev) \
1270 (dev)->a_ops.adapter_check_health(dev) 1690 (dev)->a_ops.adapter_check_health(dev)
1271 1691
1272#define aac_adapter_restart(dev,bled) \ 1692#define aac_adapter_restart(dev, bled, reset_type) \
1273 (dev)->a_ops.adapter_restart(dev,bled) 1693 ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
1274 1694
1275#define aac_adapter_start(dev) \ 1695#define aac_adapter_start(dev) \
1276 ((dev)->a_ops.adapter_start(dev)) 1696 ((dev)->a_ops.adapter_start(dev))
@@ -1300,6 +1720,8 @@ struct aac_dev
1300#define FIB_CONTEXT_FLAG (0x00000002) 1720#define FIB_CONTEXT_FLAG (0x00000002)
1301#define FIB_CONTEXT_FLAG_WAIT (0x00000004) 1721#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
1302#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008) 1722#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008)
1723#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010)
1724#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020)
1303 1725
1304/* 1726/*
1305 * Define the command values 1727 * Define the command values
@@ -1358,6 +1780,7 @@ struct aac_dev
1358#define ST_IO 5 1780#define ST_IO 5
1359#define ST_NXIO 6 1781#define ST_NXIO 6
1360#define ST_E2BIG 7 1782#define ST_E2BIG 7
1783#define ST_MEDERR 8
1361#define ST_ACCES 13 1784#define ST_ACCES 13
1362#define ST_EXIST 17 1785#define ST_EXIST 17
1363#define ST_XDEV 18 1786#define ST_XDEV 18
@@ -1715,6 +2138,8 @@ struct aac_fsinfo {
1715 2138
1716struct aac_blockdevinfo { 2139struct aac_blockdevinfo {
1717 __le32 block_size; 2140 __le32 block_size;
2141 __le32 logical_phys_map;
2142 u8 identifier[16];
1718}; 2143};
1719 2144
1720union aac_contentinfo { 2145union aac_contentinfo {
@@ -1940,6 +2365,15 @@ struct revision
1940#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER) 2365#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
1941#define FSACTL_GET_CONTAINERS 2131 2366#define FSACTL_GET_CONTAINERS 2131
1942#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED) 2367#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED)
2368#define FSACTL_RESET_IOP CTL_CODE(2140, METHOD_BUFFERED)
2369#define FSACTL_GET_HBA_INFO CTL_CODE(2150, METHOD_BUFFERED)
2370/* flags defined for IOP & HW SOFT RESET */
2371#define HW_IOP_RESET 0x01
2372#define HW_SOFT_RESET 0x02
2373#define IOP_HWSOFT_RESET (HW_IOP_RESET | HW_SOFT_RESET)
2374/* HW Soft Reset register offset */
2375#define IBW_SWR_OFFSET 0x4000
2376#define SOFT_RESET_TIME 60
1943 2377
1944 2378
1945struct aac_common 2379struct aac_common
@@ -1958,6 +2392,8 @@ struct aac_common
1958#ifdef DBG 2392#ifdef DBG
1959 u32 FibsSent; 2393 u32 FibsSent;
1960 u32 FibRecved; 2394 u32 FibRecved;
2395 u32 NativeSent;
2396 u32 NativeRecved;
1961 u32 NoResponseSent; 2397 u32 NoResponseSent;
1962 u32 NoResponseRecved; 2398 u32 NoResponseRecved;
1963 u32 AsyncSent; 2399 u32 AsyncSent;
@@ -1969,6 +2405,56 @@ struct aac_common
1969 2405
1970extern struct aac_common aac_config; 2406extern struct aac_common aac_config;
1971 2407
2408/*
2409 * This is for management ioctl purpose only.
2410 */
2411struct aac_hba_info {
2412
2413 u8 driver_name[50];
2414 u8 adapter_number;
2415 u8 system_io_bus_number;
2416 u8 device_number;
2417 u32 function_number;
2418 u32 vendor_id;
2419 u32 device_id;
2420 u32 sub_vendor_id;
2421 u32 sub_system_id;
2422 u32 mapped_base_address_size;
2423 u32 base_physical_address_high_part;
2424 u32 base_physical_address_low_part;
2425
2426 u32 max_command_size;
2427 u32 max_fib_size;
2428 u32 max_scatter_gather_from_os;
2429 u32 max_scatter_gather_to_fw;
2430 u32 max_outstanding_fibs;
2431
2432 u32 queue_start_threshold;
2433 u32 queue_dump_threshold;
2434 u32 max_io_size_queued;
2435 u32 outstanding_io;
2436
2437 u32 firmware_build_number;
2438 u32 bios_build_number;
2439 u32 driver_build_number;
2440 u32 serial_number_high_part;
2441 u32 serial_number_low_part;
2442 u32 supported_options;
2443 u32 feature_bits;
2444 u32 currentnumber_ports;
2445
2446 u8 new_comm_interface:1;
2447 u8 new_commands_supported:1;
2448 u8 disable_passthrough:1;
2449 u8 expose_non_dasd:1;
2450 u8 queue_allowed:1;
2451 u8 bled_check_enabled:1;
2452 u8 reserved1:1;
2453 u8 reserted2:1;
2454
2455 u32 reserved3[10];
2456
2457};
1972 2458
1973/* 2459/*
1974 * The following macro is used when sending and receiving FIBs. It is 2460 * The following macro is used when sending and receiving FIBs. It is
@@ -2096,9 +2582,10 @@ extern struct aac_common aac_config;
2096 2582
2097/* PMC NEW COMM: Request the event data */ 2583/* PMC NEW COMM: Request the event data */
2098#define AifReqEvent 200 2584#define AifReqEvent 200
2585#define AifRawDeviceRemove 203 /* RAW device deleted */
2586#define AifNativeDeviceAdd 204 /* native HBA device added */
2587#define AifNativeDeviceRemove 205 /* native HBA device removed */
2099 2588
2100/* RAW device deleted */
2101#define AifRawDeviceRemove 203
2102 2589
2103/* 2590/*
2104 * Adapter Initiated FIB command structures. Start with the adapter 2591 * Adapter Initiated FIB command structures. Start with the adapter
@@ -2131,6 +2618,8 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2131 2618
2132int aac_acquire_irq(struct aac_dev *dev); 2619int aac_acquire_irq(struct aac_dev *dev);
2133void aac_free_irq(struct aac_dev *dev); 2620void aac_free_irq(struct aac_dev *dev);
2621int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
2622int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
2134const char *aac_driverinfo(struct Scsi_Host *); 2623const char *aac_driverinfo(struct Scsi_Host *);
2135void aac_fib_vector_assign(struct aac_dev *dev); 2624void aac_fib_vector_assign(struct aac_dev *dev);
2136struct fib *aac_fib_alloc(struct aac_dev *dev); 2625struct fib *aac_fib_alloc(struct aac_dev *dev);
@@ -2141,9 +2630,12 @@ void aac_fib_free(struct fib * context);
2141void aac_fib_init(struct fib * context); 2630void aac_fib_init(struct fib * context);
2142void aac_printf(struct aac_dev *dev, u32 val); 2631void aac_printf(struct aac_dev *dev, u32 val);
2143int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); 2632int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
2633int aac_hba_send(u8 command, struct fib *context,
2634 fib_callback callback, void *ctxt);
2144int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); 2635int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
2145void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); 2636void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
2146int aac_fib_complete(struct fib * context); 2637int aac_fib_complete(struct fib * context);
2638void aac_hba_callback(void *context, struct fib *fibptr);
2147#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data) 2639#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
2148struct aac_dev *aac_init_adapter(struct aac_dev *dev); 2640struct aac_dev *aac_init_adapter(struct aac_dev *dev);
2149void aac_src_access_devreg(struct aac_dev *dev, int mode); 2641void aac_src_access_devreg(struct aac_dev *dev, int mode);
@@ -2169,7 +2661,7 @@ unsigned int aac_command_normal(struct aac_queue * q);
2169unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, 2661unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
2170 int isAif, int isFastResponse, 2662 int isAif, int isFastResponse,
2171 struct hw_fib *aif_fib); 2663 struct hw_fib *aif_fib);
2172int aac_reset_adapter(struct aac_dev * dev, int forced); 2664int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type);
2173int aac_check_health(struct aac_dev * dev); 2665int aac_check_health(struct aac_dev * dev);
2174int aac_command_thread(void *data); 2666int aac_command_thread(void *data);
2175int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 2667int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
@@ -2183,7 +2675,6 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm);
2183int aac_rx_deliver_producer(struct fib * fib); 2675int aac_rx_deliver_producer(struct fib * fib);
2184char * get_container_type(unsigned type); 2676char * get_container_type(unsigned type);
2185extern int numacb; 2677extern int numacb;
2186extern int acbsize;
2187extern char aac_driver_version[]; 2678extern char aac_driver_version[];
2188extern int startup_timeout; 2679extern int startup_timeout;
2189extern int aif_timeout; 2680extern int aif_timeout;
@@ -2194,3 +2685,4 @@ extern int aac_commit;
2194extern int update_interval; 2685extern int update_interval;
2195extern int check_interval; 2686extern int check_interval;
2196extern int aac_check_reset; 2687extern int aac_check_reset;
2688#endif
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index e1daff230c7d..614842a9eb07 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -477,20 +478,24 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
477 struct fib* srbfib; 478 struct fib* srbfib;
478 int status; 479 int status;
479 struct aac_srb *srbcmd = NULL; 480 struct aac_srb *srbcmd = NULL;
481 struct aac_hba_cmd_req *hbacmd = NULL;
480 struct user_aac_srb *user_srbcmd = NULL; 482 struct user_aac_srb *user_srbcmd = NULL;
481 struct user_aac_srb __user *user_srb = arg; 483 struct user_aac_srb __user *user_srb = arg;
482 struct aac_srb_reply __user *user_reply; 484 struct aac_srb_reply __user *user_reply;
483 struct aac_srb_reply* reply; 485 u32 chn;
484 u32 fibsize = 0; 486 u32 fibsize = 0;
485 u32 flags = 0; 487 u32 flags = 0;
486 s32 rcode = 0; 488 s32 rcode = 0;
487 u32 data_dir; 489 u32 data_dir;
488 void __user *sg_user[32]; 490 void __user *sg_user[HBA_MAX_SG_EMBEDDED];
489 void *sg_list[32]; 491 void *sg_list[HBA_MAX_SG_EMBEDDED];
492 u32 sg_count[HBA_MAX_SG_EMBEDDED];
490 u32 sg_indx = 0; 493 u32 sg_indx = 0;
491 u32 byte_count = 0; 494 u32 byte_count = 0;
492 u32 actual_fibsize64, actual_fibsize = 0; 495 u32 actual_fibsize64, actual_fibsize = 0;
493 int i; 496 int i;
497 int is_native_device;
498 u64 address;
494 499
495 500
496 if (dev->in_reset) { 501 if (dev->in_reset) {
@@ -507,11 +512,6 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
507 if (!(srbfib = aac_fib_alloc(dev))) { 512 if (!(srbfib = aac_fib_alloc(dev))) {
508 return -ENOMEM; 513 return -ENOMEM;
509 } 514 }
510 aac_fib_init(srbfib);
511 /* raw_srb FIB is not FastResponseCapable */
512 srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
513
514 srbcmd = (struct aac_srb*) fib_data(srbfib);
515 515
516 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ 516 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
517 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ 517 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
@@ -538,21 +538,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
538 goto cleanup; 538 goto cleanup;
539 } 539 }
540 540
541 user_reply = arg+fibsize;
542
543 flags = user_srbcmd->flags; /* from user in cpu order */ 541 flags = user_srbcmd->flags; /* from user in cpu order */
544 // Fix up srb for endian and force some values
545
546 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
547 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
548 srbcmd->id = cpu_to_le32(user_srbcmd->id);
549 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
550 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
551 srbcmd->flags = cpu_to_le32(flags);
552 srbcmd->retry_limit = 0; // Obsolete parameter
553 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
554 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
555
556 switch (flags & (SRB_DataIn | SRB_DataOut)) { 542 switch (flags & (SRB_DataIn | SRB_DataOut)) {
557 case SRB_DataOut: 543 case SRB_DataOut:
558 data_dir = DMA_TO_DEVICE; 544 data_dir = DMA_TO_DEVICE;
@@ -568,7 +554,12 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
568 } 554 }
569 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { 555 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
570 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", 556 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
571 le32_to_cpu(srbcmd->sg.count))); 557 user_srbcmd->sg.count));
558 rcode = -EINVAL;
559 goto cleanup;
560 }
561 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
562 dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
572 rcode = -EINVAL; 563 rcode = -EINVAL;
573 goto cleanup; 564 goto cleanup;
574 } 565 }
@@ -588,13 +579,136 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
588 rcode = -EINVAL; 579 rcode = -EINVAL;
589 goto cleanup; 580 goto cleanup;
590 } 581 }
591 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { 582
592 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); 583 chn = aac_logical_to_phys(user_srbcmd->channel);
593 rcode = -EINVAL; 584 if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
594 goto cleanup; 585 dev->hba_map[chn][user_srbcmd->id].devtype ==
586 AAC_DEVTYPE_NATIVE_RAW) {
587 is_native_device = 1;
588 hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
589 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
590
591 /* iu_type is a parameter of aac_hba_send */
592 switch (data_dir) {
593 case DMA_TO_DEVICE:
594 hbacmd->byte1 = 2;
595 break;
596 case DMA_FROM_DEVICE:
597 case DMA_BIDIRECTIONAL:
598 hbacmd->byte1 = 1;
599 break;
600 case DMA_NONE:
601 default:
602 break;
603 }
604 hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
605 hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
606
607 /*
608 * we fill in reply_qid later in aac_src_deliver_message
609 * we fill in iu_type, request_id later in aac_hba_send
610 * we fill in emb_data_desc_count, data_length later
611 * in sg list build
612 */
613
614 memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
615
616 address = (u64)srbfib->hw_error_pa;
617 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
618 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
619 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
620 hbacmd->emb_data_desc_count =
621 cpu_to_le32(user_srbcmd->sg.count);
622 srbfib->hbacmd_size = 64 +
623 user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
624
625 } else {
626 is_native_device = 0;
627 aac_fib_init(srbfib);
628
629 /* raw_srb FIB is not FastResponseCapable */
630 srbfib->hw_fib_va->header.XferState &=
631 ~cpu_to_le32(FastResponseCapable);
632
633 srbcmd = (struct aac_srb *) fib_data(srbfib);
634
635 // Fix up srb for endian and force some values
636
637 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
638 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
639 srbcmd->id = cpu_to_le32(user_srbcmd->id);
640 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
641 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
642 srbcmd->flags = cpu_to_le32(flags);
643 srbcmd->retry_limit = 0; // Obsolete parameter
644 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
645 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
595 } 646 }
647
596 byte_count = 0; 648 byte_count = 0;
597 if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { 649 if (is_native_device) {
650 struct user_sgmap *usg32 = &user_srbcmd->sg;
651 struct user_sgmap64 *usg64 =
652 (struct user_sgmap64 *)&user_srbcmd->sg;
653
654 for (i = 0; i < usg32->count; i++) {
655 void *p;
656 u64 addr;
657
658 sg_count[i] = (actual_fibsize64 == fibsize) ?
659 usg64->sg[i].count : usg32->sg[i].count;
660 if (sg_count[i] >
661 (dev->scsi_host_ptr->max_sectors << 9)) {
662 pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
663 i, sg_count[i],
664 dev->scsi_host_ptr->max_sectors << 9);
665 rcode = -EINVAL;
666 goto cleanup;
667 }
668
669 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
670 if (!p) {
671 rcode = -ENOMEM;
672 goto cleanup;
673 }
674
675 if (actual_fibsize64 == fibsize) {
676 addr = (u64)usg64->sg[i].addr[0];
677 addr += ((u64)usg64->sg[i].addr[1]) << 32;
678 } else {
679 addr = (u64)usg32->sg[i].addr;
680 }
681
682 sg_user[i] = (void __user *)(uintptr_t)addr;
683 sg_list[i] = p; // save so we can clean up later
684 sg_indx = i;
685
686 if (flags & SRB_DataOut) {
687 if (copy_from_user(p, sg_user[i],
688 sg_count[i])) {
689 rcode = -EFAULT;
690 goto cleanup;
691 }
692 }
693 addr = pci_map_single(dev->pdev, p, sg_count[i],
694 data_dir);
695 hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
696 hbacmd->sge[i].addr_lo = cpu_to_le32(
697 (u32)(addr & 0xffffffff));
698 hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
699 hbacmd->sge[i].flags = 0;
700 byte_count += sg_count[i];
701 }
702
703 if (usg32->count > 0) /* embedded sglist */
704 hbacmd->sge[usg32->count-1].flags =
705 cpu_to_le32(0x40000000);
706 hbacmd->data_length = cpu_to_le32(byte_count);
707
708 status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
709 NULL, NULL);
710
711 } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
598 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; 712 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
599 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; 713 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
600 714
@@ -606,7 +720,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
606 for (i = 0; i < upsg->count; i++) { 720 for (i = 0; i < upsg->count; i++) {
607 u64 addr; 721 u64 addr;
608 void* p; 722 void* p;
609 if (upsg->sg[i].count > 723
724 sg_count[i] = upsg->sg[i].count;
725 if (sg_count[i] >
610 ((dev->adapter_info.options & 726 ((dev->adapter_info.options &
611 AAC_OPT_NEW_COMM) ? 727 AAC_OPT_NEW_COMM) ?
612 (dev->scsi_host_ptr->max_sectors << 9) : 728 (dev->scsi_host_ptr->max_sectors << 9) :
@@ -615,10 +731,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
615 goto cleanup; 731 goto cleanup;
616 } 732 }
617 /* Does this really need to be GFP_DMA? */ 733 /* Does this really need to be GFP_DMA? */
618 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); 734 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
619 if(!p) { 735 if(!p) {
620 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 736 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
621 upsg->sg[i].count,i,upsg->count)); 737 sg_count[i], i, upsg->count));
622 rcode = -ENOMEM; 738 rcode = -ENOMEM;
623 goto cleanup; 739 goto cleanup;
624 } 740 }
@@ -629,18 +745,20 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
629 sg_indx = i; 745 sg_indx = i;
630 746
631 if (flags & SRB_DataOut) { 747 if (flags & SRB_DataOut) {
632 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 748 if (copy_from_user(p, sg_user[i],
749 sg_count[i])){
633 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 750 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
634 rcode = -EFAULT; 751 rcode = -EFAULT;
635 goto cleanup; 752 goto cleanup;
636 } 753 }
637 } 754 }
638 addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); 755 addr = pci_map_single(dev->pdev, p,
756 sg_count[i], data_dir);
639 757
640 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 758 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
641 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 759 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
642 byte_count += upsg->sg[i].count; 760 byte_count += sg_count[i];
643 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); 761 psg->sg[i].count = cpu_to_le32(sg_count[i]);
644 } 762 }
645 } else { 763 } else {
646 struct user_sgmap* usg; 764 struct user_sgmap* usg;
@@ -657,7 +775,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
657 for (i = 0; i < usg->count; i++) { 775 for (i = 0; i < usg->count; i++) {
658 u64 addr; 776 u64 addr;
659 void* p; 777 void* p;
660 if (usg->sg[i].count > 778
779 sg_count[i] = usg->sg[i].count;
780 if (sg_count[i] >
661 ((dev->adapter_info.options & 781 ((dev->adapter_info.options &
662 AAC_OPT_NEW_COMM) ? 782 AAC_OPT_NEW_COMM) ?
663 (dev->scsi_host_ptr->max_sectors << 9) : 783 (dev->scsi_host_ptr->max_sectors << 9) :
@@ -667,10 +787,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
667 goto cleanup; 787 goto cleanup;
668 } 788 }
669 /* Does this really need to be GFP_DMA? */ 789 /* Does this really need to be GFP_DMA? */
670 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 790 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
671 if(!p) { 791 if(!p) {
672 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 792 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
673 usg->sg[i].count,i,usg->count)); 793 sg_count[i], i, usg->count));
674 kfree(usg); 794 kfree(usg);
675 rcode = -ENOMEM; 795 rcode = -ENOMEM;
676 goto cleanup; 796 goto cleanup;
@@ -680,19 +800,21 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
680 sg_indx = i; 800 sg_indx = i;
681 801
682 if (flags & SRB_DataOut) { 802 if (flags & SRB_DataOut) {
683 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 803 if (copy_from_user(p, sg_user[i],
804 sg_count[i])) {
684 kfree (usg); 805 kfree (usg);
685 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 806 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
686 rcode = -EFAULT; 807 rcode = -EFAULT;
687 goto cleanup; 808 goto cleanup;
688 } 809 }
689 } 810 }
690 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); 811 addr = pci_map_single(dev->pdev, p,
812 sg_count[i], data_dir);
691 813
692 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 814 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
693 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 815 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
694 byte_count += usg->sg[i].count; 816 byte_count += sg_count[i];
695 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); 817 psg->sg[i].count = cpu_to_le32(sg_count[i]);
696 } 818 }
697 kfree (usg); 819 kfree (usg);
698 } 820 }
@@ -711,7 +833,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
711 for (i = 0; i < upsg->count; i++) { 833 for (i = 0; i < upsg->count; i++) {
712 uintptr_t addr; 834 uintptr_t addr;
713 void* p; 835 void* p;
714 if (usg->sg[i].count > 836
837 sg_count[i] = usg->sg[i].count;
838 if (sg_count[i] >
715 ((dev->adapter_info.options & 839 ((dev->adapter_info.options &
716 AAC_OPT_NEW_COMM) ? 840 AAC_OPT_NEW_COMM) ?
717 (dev->scsi_host_ptr->max_sectors << 9) : 841 (dev->scsi_host_ptr->max_sectors << 9) :
@@ -720,10 +844,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
720 goto cleanup; 844 goto cleanup;
721 } 845 }
722 /* Does this really need to be GFP_DMA? */ 846 /* Does this really need to be GFP_DMA? */
723 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 847 p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
724 if(!p) { 848 if (!p) {
725 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 849 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
726 usg->sg[i].count,i,usg->count)); 850 sg_count[i], i, usg->count));
727 rcode = -ENOMEM; 851 rcode = -ENOMEM;
728 goto cleanup; 852 goto cleanup;
729 } 853 }
@@ -734,7 +858,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
734 sg_indx = i; 858 sg_indx = i;
735 859
736 if (flags & SRB_DataOut) { 860 if (flags & SRB_DataOut) {
737 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ 861 if (copy_from_user(p, sg_user[i],
862 sg_count[i])){
738 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 863 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
739 rcode = -EFAULT; 864 rcode = -EFAULT;
740 goto cleanup; 865 goto cleanup;
@@ -744,13 +869,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
744 869
745 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); 870 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
746 byte_count += usg->sg[i].count; 871 byte_count += usg->sg[i].count;
747 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); 872 psg->sg[i].count = cpu_to_le32(sg_count[i]);
748 } 873 }
749 } else { 874 } else {
750 for (i = 0; i < upsg->count; i++) { 875 for (i = 0; i < upsg->count; i++) {
751 dma_addr_t addr; 876 dma_addr_t addr;
752 void* p; 877 void* p;
753 if (upsg->sg[i].count > 878
879 sg_count[i] = upsg->sg[i].count;
880 if (sg_count[i] >
754 ((dev->adapter_info.options & 881 ((dev->adapter_info.options &
755 AAC_OPT_NEW_COMM) ? 882 AAC_OPT_NEW_COMM) ?
756 (dev->scsi_host_ptr->max_sectors << 9) : 883 (dev->scsi_host_ptr->max_sectors << 9) :
@@ -758,10 +885,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
758 rcode = -EINVAL; 885 rcode = -EINVAL;
759 goto cleanup; 886 goto cleanup;
760 } 887 }
761 p = kmalloc(upsg->sg[i].count, GFP_KERNEL); 888 p = kmalloc(sg_count[i], GFP_KERNEL);
762 if (!p) { 889 if (!p) {
763 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 890 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
764 upsg->sg[i].count, i, upsg->count)); 891 sg_count[i], i, upsg->count));
765 rcode = -ENOMEM; 892 rcode = -ENOMEM;
766 goto cleanup; 893 goto cleanup;
767 } 894 }
@@ -770,19 +897,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
770 sg_indx = i; 897 sg_indx = i;
771 898
772 if (flags & SRB_DataOut) { 899 if (flags & SRB_DataOut) {
773 if(copy_from_user(p, sg_user[i], 900 if (copy_from_user(p, sg_user[i],
774 upsg->sg[i].count)) { 901 sg_count[i])) {
775 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 902 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
776 rcode = -EFAULT; 903 rcode = -EFAULT;
777 goto cleanup; 904 goto cleanup;
778 } 905 }
779 } 906 }
780 addr = pci_map_single(dev->pdev, p, 907 addr = pci_map_single(dev->pdev, p,
781 upsg->sg[i].count, data_dir); 908 sg_count[i], data_dir);
782 909
783 psg->sg[i].addr = cpu_to_le32(addr); 910 psg->sg[i].addr = cpu_to_le32(addr);
784 byte_count += upsg->sg[i].count; 911 byte_count += sg_count[i];
785 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); 912 psg->sg[i].count = cpu_to_le32(sg_count[i]);
786 } 913 }
787 } 914 }
788 srbcmd->count = cpu_to_le32(byte_count); 915 srbcmd->count = cpu_to_le32(byte_count);
@@ -792,12 +919,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
792 psg->count = 0; 919 psg->count = 0;
793 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 920 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
794 } 921 }
922
795 if (status == -ERESTARTSYS) { 923 if (status == -ERESTARTSYS) {
796 rcode = -ERESTARTSYS; 924 rcode = -ERESTARTSYS;
797 goto cleanup; 925 goto cleanup;
798 } 926 }
799 927
800 if (status != 0){ 928 if (status != 0) {
801 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 929 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
802 rcode = -ENXIO; 930 rcode = -ENXIO;
803 goto cleanup; 931 goto cleanup;
@@ -805,11 +933,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
805 933
806 if (flags & SRB_DataIn) { 934 if (flags & SRB_DataIn) {
807 for(i = 0 ; i <= sg_indx; i++){ 935 for(i = 0 ; i <= sg_indx; i++){
808 byte_count = le32_to_cpu( 936 if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
809 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
810 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
811 : srbcmd->sg.sg[i].count);
812 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
813 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); 937 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
814 rcode = -EFAULT; 938 rcode = -EFAULT;
815 goto cleanup; 939 goto cleanup;
@@ -818,19 +942,50 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
818 } 942 }
819 } 943 }
820 944
821 reply = (struct aac_srb_reply *) fib_data(srbfib); 945 user_reply = arg + fibsize;
822 if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ 946 if (is_native_device) {
823 dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); 947 struct aac_hba_resp *err =
824 rcode = -EFAULT; 948 &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
825 goto cleanup; 949 struct aac_srb_reply reply;
950
951 reply.status = ST_OK;
952 if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
953 /* fast response */
954 reply.srb_status = SRB_STATUS_SUCCESS;
955 reply.scsi_status = 0;
956 reply.data_xfer_length = byte_count;
957 } else {
958 reply.srb_status = err->service_response;
959 reply.scsi_status = err->status;
960 reply.data_xfer_length = byte_count -
961 le32_to_cpu(err->residual_count);
962 reply.sense_data_size = err->sense_response_data_len;
963 memcpy(reply.sense_data, err->sense_response_buf,
964 AAC_SENSE_BUFFERSIZE);
965 }
966 if (copy_to_user(user_reply, &reply,
967 sizeof(struct aac_srb_reply))) {
968 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
969 rcode = -EFAULT;
970 goto cleanup;
971 }
972 } else {
973 struct aac_srb_reply *reply;
974
975 reply = (struct aac_srb_reply *) fib_data(srbfib);
976 if (copy_to_user(user_reply, reply,
977 sizeof(struct aac_srb_reply))) {
978 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
979 rcode = -EFAULT;
980 goto cleanup;
981 }
826 } 982 }
827 983
828cleanup: 984cleanup:
829 kfree(user_srbcmd); 985 kfree(user_srbcmd);
830 for(i=0; i <= sg_indx; i++){
831 kfree(sg_list[i]);
832 }
833 if (rcode != -ERESTARTSYS) { 986 if (rcode != -ERESTARTSYS) {
987 for (i = 0; i <= sg_indx; i++)
988 kfree(sg_list[i]);
834 aac_fib_complete(srbfib); 989 aac_fib_complete(srbfib);
835 aac_fib_free(srbfib); 990 aac_fib_free(srbfib);
836 } 991 }
@@ -858,6 +1013,44 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
858 return 0; 1013 return 0;
859} 1014}
860 1015
1016static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
1017{
1018 struct aac_hba_info hbainfo;
1019
1020 hbainfo.adapter_number = (u8) dev->id;
1021 hbainfo.system_io_bus_number = dev->pdev->bus->number;
1022 hbainfo.device_number = (dev->pdev->devfn >> 3);
1023 hbainfo.function_number = (dev->pdev->devfn & 0x0007);
1024
1025 hbainfo.vendor_id = dev->pdev->vendor;
1026 hbainfo.device_id = dev->pdev->device;
1027 hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
1028 hbainfo.sub_system_id = dev->pdev->subsystem_device;
1029
1030 if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
1031 dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
1032 return -EFAULT;
1033 }
1034
1035 return 0;
1036}
1037
1038struct aac_reset_iop {
1039 u8 reset_type;
1040};
1041
1042static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
1043{
1044 struct aac_reset_iop reset;
1045 int retval;
1046
1047 if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
1048 return -EFAULT;
1049
1050 retval = aac_reset_adapter(dev, 0, reset.reset_type);
1051 return retval;
1052
1053}
861 1054
862int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) 1055int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
863{ 1056{
@@ -901,6 +1094,13 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
901 case FSACTL_GET_PCI_INFO: 1094 case FSACTL_GET_PCI_INFO:
902 status = aac_get_pci_info(dev,arg); 1095 status = aac_get_pci_info(dev,arg);
903 break; 1096 break;
1097 case FSACTL_GET_HBA_INFO:
1098 status = aac_get_hba_info(dev, arg);
1099 break;
1100 case FSACTL_RESET_IOP:
1101 status = aac_send_reset_adapter(dev, arg);
1102 break;
1103
904 default: 1104 default:
905 status = -ENOTTY; 1105 status = -ENOTTY;
906 break; 1106 break;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 5b48bedd7c38..40bfc57b6849 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -72,104 +73,175 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
72 unsigned long size, align; 73 unsigned long size, align;
73 const unsigned long fibsize = dev->max_fib_size; 74 const unsigned long fibsize = dev->max_fib_size;
74 const unsigned long printfbufsiz = 256; 75 const unsigned long printfbufsiz = 256;
75 unsigned long host_rrq_size = 0; 76 unsigned long host_rrq_size, aac_init_size;
76 struct aac_init *init; 77 union aac_init *init;
77 dma_addr_t phys; 78 dma_addr_t phys;
78 unsigned long aac_max_hostphysmempages; 79 unsigned long aac_max_hostphysmempages;
79 80
80 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || 81 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
81 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) 82 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
83 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
84 !dev->sa_firmware)) {
85 host_rrq_size =
86 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)
87 * sizeof(u32);
88 aac_init_size = sizeof(union aac_init);
89 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
90 dev->sa_firmware) {
82 host_rrq_size = (dev->scsi_host_ptr->can_queue 91 host_rrq_size = (dev->scsi_host_ptr->can_queue
83 + AAC_NUM_MGT_FIB) * sizeof(u32); 92 + AAC_NUM_MGT_FIB) * sizeof(u32) * AAC_MAX_MSIX;
84 size = fibsize + sizeof(struct aac_init) + commsize + 93 aac_init_size = sizeof(union aac_init) +
85 commalign + printfbufsiz + host_rrq_size; 94 (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq);
86 95 } else {
96 host_rrq_size = 0;
97 aac_init_size = sizeof(union aac_init);
98 }
99 size = fibsize + aac_init_size + commsize + commalign +
100 printfbufsiz + host_rrq_size;
101
87 base = pci_alloc_consistent(dev->pdev, size, &phys); 102 base = pci_alloc_consistent(dev->pdev, size, &phys);
88 103
89 if(base == NULL) 104 if (base == NULL) {
90 {
91 printk(KERN_ERR "aacraid: unable to create mapping.\n"); 105 printk(KERN_ERR "aacraid: unable to create mapping.\n");
92 return 0; 106 return 0;
93 } 107 }
108
94 dev->comm_addr = (void *)base; 109 dev->comm_addr = (void *)base;
95 dev->comm_phys = phys; 110 dev->comm_phys = phys;
96 dev->comm_size = size; 111 dev->comm_size = size;
97 112
98 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || 113 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
99 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { 114 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
115 (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) {
100 dev->host_rrq = (u32 *)(base + fibsize); 116 dev->host_rrq = (u32 *)(base + fibsize);
101 dev->host_rrq_pa = phys + fibsize; 117 dev->host_rrq_pa = phys + fibsize;
102 memset(dev->host_rrq, 0, host_rrq_size); 118 memset(dev->host_rrq, 0, host_rrq_size);
103 } 119 }
104 120
105 dev->init = (struct aac_init *)(base + fibsize + host_rrq_size); 121 dev->init = (union aac_init *)(base + fibsize + host_rrq_size);
106 dev->init_pa = phys + fibsize + host_rrq_size; 122 dev->init_pa = phys + fibsize + host_rrq_size;
107 123
108 init = dev->init; 124 init = dev->init;
109 125
110 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); 126 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
111 if (dev->max_fib_size != sizeof(struct hw_fib)) 127 int i;
112 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); 128 u64 addr;
113 init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS); 129
114 init->fsrev = cpu_to_le32(dev->fsrev); 130 init->r8.init_struct_revision =
131 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8);
132 init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
133 INITFLAGS_DRIVER_USES_UTC_TIME |
134 INITFLAGS_DRIVER_SUPPORTS_PM);
135 init->r8.init_flags |=
136 cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE);
137 init->r8.rr_queue_count = cpu_to_le32(dev->max_msix);
138 init->r8.max_io_size =
139 cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
140 init->r8.max_num_aif = init->r8.reserved1 =
141 init->r8.reserved2 = 0;
142
143 for (i = 0; i < dev->max_msix; i++) {
144 addr = (u64)dev->host_rrq_pa + dev->vector_cap * i *
145 sizeof(u32);
146 init->r8.rrq[i].host_addr_high = cpu_to_le32(
147 upper_32_bits(addr));
148 init->r8.rrq[i].host_addr_low = cpu_to_le32(
149 lower_32_bits(addr));
150 init->r8.rrq[i].msix_id = i;
151 init->r8.rrq[i].element_count = cpu_to_le16(
152 (u16)dev->vector_cap);
153 init->r8.rrq[i].comp_thresh =
154 init->r8.rrq[i].unused = 0;
155 }
115 156
116 /* 157 pr_warn("aacraid: Comm Interface type3 enabled\n");
117 * Adapter Fibs are the first thing allocated so that they 158 } else {
118 * start page aligned 159 init->r7.init_struct_revision =
119 */ 160 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
120 dev->aif_base_va = (struct hw_fib *)base; 161 if (dev->max_fib_size != sizeof(struct hw_fib))
121 162 init->r7.init_struct_revision =
122 init->AdapterFibsVirtualAddress = 0; 163 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
123 init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys); 164 init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION);
124 init->AdapterFibsSize = cpu_to_le32(fibsize); 165 init->r7.fsrev = cpu_to_le32(dev->fsrev);
125 init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib)); 166
126 /* 167 /*
127 * number of 4k pages of host physical memory. The aacraid fw needs 168 * Adapter Fibs are the first thing allocated so that they
128 * this number to be less than 4gb worth of pages. New firmware doesn't 169 * start page aligned
129 * have any issues with the mapping system, but older Firmware did, and 170 */
130 * had *troubles* dealing with the math overloading past 32 bits, thus 171 dev->aif_base_va = (struct hw_fib *)base;
131 * we must limit this field. 172
132 */ 173 init->r7.adapter_fibs_virtual_address = 0;
133 aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12; 174 init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys);
134 if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES) 175 init->r7.adapter_fibs_size = cpu_to_le32(fibsize);
135 init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages); 176 init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib));
136 else 177
137 init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); 178 /*
138 179 * number of 4k pages of host physical memory. The aacraid fw
139 init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME | 180 * needs this number to be less than 4gb worth of pages. New
140 INITFLAGS_DRIVER_SUPPORTS_PM); 181 * firmware doesn't have any issues with the mapping system, but
141 init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); 182 * older Firmware did, and had *troubles* dealing with the math
142 init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9); 183 * overloading past 32 bits, thus we must limit this field.
143 init->MaxFibSize = cpu_to_le32(dev->max_fib_size); 184 */
144 init->MaxNumAif = cpu_to_le32(dev->max_num_aif); 185 aac_max_hostphysmempages =
145 186 dma_get_required_mask(&dev->pdev->dev) >> 12;
146 if (dev->comm_interface == AAC_COMM_MESSAGE) { 187 if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
147 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); 188 init->r7.host_phys_mem_pages =
148 dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); 189 cpu_to_le32(aac_max_hostphysmempages);
149 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) { 190 else
150 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6); 191 init->r7.host_phys_mem_pages =
151 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | 192 cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
152 INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); 193
153 init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); 194 init->r7.init_flags =
154 init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); 195 cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
155 dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n")); 196 INITFLAGS_DRIVER_SUPPORTS_PM);
156 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { 197 init->r7.max_io_commands =
157 init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7); 198 cpu_to_le32(dev->scsi_host_ptr->can_queue +
158 init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED | 199 AAC_NUM_MGT_FIB);
159 INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); 200 init->r7.max_io_size =
160 init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); 201 cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
161 init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); 202 init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size);
162 /* number of MSI-X */ 203 init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif);
163 init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); 204
164 dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n")); 205 if (dev->comm_interface == AAC_COMM_MESSAGE) {
206 init->r7.init_flags |=
207 cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
208 pr_warn("aacraid: Comm Interface enabled\n");
209 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
210 init->r7.init_struct_revision =
211 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
212 init->r7.init_flags |=
213 cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
214 INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
215 INITFLAGS_FAST_JBOD_SUPPORTED);
216 init->r7.host_rrq_addr_high =
217 cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
218 init->r7.host_rrq_addr_low =
219 cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
220 pr_warn("aacraid: Comm Interface type1 enabled\n");
221 } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
222 init->r7.init_struct_revision =
223 cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
224 init->r7.init_flags |=
225 cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
226 INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
227 INITFLAGS_FAST_JBOD_SUPPORTED);
228 init->r7.host_rrq_addr_high =
229 cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
230 init->r7.host_rrq_addr_low =
231 cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
232 init->r7.no_of_msix_vectors =
233 cpu_to_le32(dev->max_msix);
234 /* must be the COMM_PREFERRED_SETTINGS values */
235 pr_warn("aacraid: Comm Interface type2 enabled\n");
236 }
165 } 237 }
166 238
167 /* 239 /*
168 * Increment the base address by the amount already used 240 * Increment the base address by the amount already used
169 */ 241 */
170 base = base + fibsize + host_rrq_size + sizeof(struct aac_init); 242 base = base + fibsize + host_rrq_size + aac_init_size;
171 phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size + 243 phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
172 sizeof(struct aac_init)); 244 aac_init_size);
173 245
174 /* 246 /*
175 * Align the beginning of Headers to commalign 247 * Align the beginning of Headers to commalign
@@ -181,7 +253,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
181 * Fill in addresses of the Comm Area Headers and Queues 253 * Fill in addresses of the Comm Area Headers and Queues
182 */ 254 */
183 *commaddr = base; 255 *commaddr = base;
184 init->CommHeaderAddress = cpu_to_le32((u32)phys); 256 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
257 init->r7.comm_header_address = cpu_to_le32((u32)phys);
185 /* 258 /*
186 * Increment the base address by the size of the CommArea 259 * Increment the base address by the size of the CommArea
187 */ 260 */
@@ -191,12 +264,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
191 * Place the Printf buffer area after the Fast I/O comm area. 264 * Place the Printf buffer area after the Fast I/O comm area.
192 */ 265 */
193 dev->printfbuf = (void *)base; 266 dev->printfbuf = (void *)base;
194 init->printfbuf = cpu_to_le32(phys); 267 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) {
195 init->printfbufsiz = cpu_to_le32(printfbufsiz); 268 init->r7.printfbuf = cpu_to_le32(phys);
269 init->r7.printfbufsiz = cpu_to_le32(printfbufsiz);
270 }
196 memset(base, 0, printfbufsiz); 271 memset(base, 0, printfbufsiz);
197 return 1; 272 return 1;
198} 273}
199 274
200static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) 275static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
201{ 276{
202 atomic_set(&q->numpending, 0); 277 atomic_set(&q->numpending, 0);
@@ -404,9 +479,13 @@ void aac_define_int_mode(struct aac_dev *dev)
404 if (dev->max_msix > msi_count) 479 if (dev->max_msix > msi_count)
405 dev->max_msix = msi_count; 480 dev->max_msix = msi_count;
406 } 481 }
407 dev->vector_cap = 482 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware)
408 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) / 483 dev->vector_cap = dev->scsi_host_ptr->can_queue +
409 msi_count; 484 AAC_NUM_MGT_FIB;
485 else
486 dev->vector_cap = (dev->scsi_host_ptr->can_queue +
487 AAC_NUM_MGT_FIB) / msi_count;
488
410} 489}
411struct aac_dev *aac_init_adapter(struct aac_dev *dev) 490struct aac_dev *aac_init_adapter(struct aac_dev *dev)
412{ 491{
@@ -440,30 +519,37 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
440 519
441 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 520 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
442 0, 0, 0, 0, 0, 0, 521 0, 0, 0, 0, 0, 0,
443 status+0, status+1, status+2, status+3, NULL)) && 522 status+0, status+1, status+2, status+3, status+4)) &&
444 (status[0] == 0x00000001)) { 523 (status[0] == 0x00000001)) {
445 dev->doorbell_mask = status[3]; 524 dev->doorbell_mask = status[3];
446 if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) 525 if (status[1] & AAC_OPT_NEW_COMM_64)
447 dev->raw_io_64 = 1; 526 dev->raw_io_64 = 1;
448 dev->sync_mode = aac_sync_mode; 527 dev->sync_mode = aac_sync_mode;
449 if (dev->a_ops.adapter_comm && 528 if (dev->a_ops.adapter_comm &&
450 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) { 529 (status[1] & AAC_OPT_NEW_COMM)) {
451 dev->comm_interface = AAC_COMM_MESSAGE; 530 dev->comm_interface = AAC_COMM_MESSAGE;
452 dev->raw_io_interface = 1; 531 dev->raw_io_interface = 1;
453 if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) { 532 if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
454 /* driver supports TYPE1 (Tupelo) */ 533 /* driver supports TYPE1 (Tupelo) */
455 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; 534 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
456 } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) { 535 } else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) {
457 /* driver supports TYPE2 (Denali) */ 536 /* driver supports TYPE2 (Denali, Yosemite) */
458 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; 537 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
459 } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) || 538 } else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) {
460 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) { 539 /* driver supports TYPE3 (Yosemite, Thor) */
461 /* driver doesn't TYPE3 and TYPE4 */ 540 dev->comm_interface = AAC_COMM_MESSAGE_TYPE3;
462 /* switch to sync. mode */ 541 } else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) {
542 /* not supported TYPE - switch to sync. mode */
463 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2; 543 dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
464 dev->sync_mode = 1; 544 dev->sync_mode = 1;
465 } 545 }
466 } 546 }
547 if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
548 (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
549 dev->sa_firmware = 1;
550 else
551 dev->sa_firmware = 0;
552
467 if ((dev->comm_interface == AAC_COMM_MESSAGE) && 553 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
468 (status[2] > dev->base_size)) { 554 (status[2] > dev->base_size)) {
469 aac_adapter_ioremap(dev, 0); 555 aac_adapter_ioremap(dev, 0);
@@ -500,61 +586,25 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
500 dev->sg_tablesize = status[2] & 0xFFFF; 586 dev->sg_tablesize = status[2] & 0xFFFF;
501 if (dev->pdev->device == PMC_DEVICE_S7 || 587 if (dev->pdev->device == PMC_DEVICE_S7 ||
502 dev->pdev->device == PMC_DEVICE_S8 || 588 dev->pdev->device == PMC_DEVICE_S8 ||
503 dev->pdev->device == PMC_DEVICE_S9) 589 dev->pdev->device == PMC_DEVICE_S9) {
504 host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) : 590 if (host->can_queue > (status[3] >> 16) -
505 (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB; 591 AAC_NUM_MGT_FIB)
506 else 592 host->can_queue = (status[3] >> 16) -
507 host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; 593 AAC_NUM_MGT_FIB;
594 } else if (host->can_queue > (status[3] & 0xFFFF) -
595 AAC_NUM_MGT_FIB)
596 host->can_queue = (status[3] & 0xFFFF) -
597 AAC_NUM_MGT_FIB;
598
508 dev->max_num_aif = status[4] & 0xFFFF; 599 dev->max_num_aif = status[4] & 0xFFFF;
509 /*
510 * NOTE:
511 * All these overrides are based on a fixed internal
512 * knowledge and understanding of existing adapters,
513 * acbsize should be set with caution.
514 */
515 if (acbsize == 512) {
516 host->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
517 dev->max_fib_size = 512;
518 dev->sg_tablesize = host->sg_tablesize
519 = (512 - sizeof(struct aac_fibhdr)
520 - sizeof(struct aac_write) + sizeof(struct sgentry))
521 / sizeof(struct sgentry);
522 host->can_queue = AAC_NUM_IO_FIB;
523 } else if (acbsize == 2048) {
524 host->max_sectors = 512;
525 dev->max_fib_size = 2048;
526 host->sg_tablesize = 65;
527 dev->sg_tablesize = 81;
528 host->can_queue = 512 - AAC_NUM_MGT_FIB;
529 } else if (acbsize == 4096) {
530 host->max_sectors = 1024;
531 dev->max_fib_size = 4096;
532 host->sg_tablesize = 129;
533 dev->sg_tablesize = 166;
534 host->can_queue = 256 - AAC_NUM_MGT_FIB;
535 } else if (acbsize == 8192) {
536 host->max_sectors = 2048;
537 dev->max_fib_size = 8192;
538 host->sg_tablesize = 257;
539 dev->sg_tablesize = 337;
540 host->can_queue = 128 - AAC_NUM_MGT_FIB;
541 } else if (acbsize > 0) {
542 printk("Illegal acbsize=%d ignored\n", acbsize);
543 }
544 } 600 }
545 { 601 if (numacb > 0) {
546 602 if (numacb < host->can_queue)
547 if (numacb > 0) { 603 host->can_queue = numacb;
548 if (numacb < host->can_queue) 604 else
549 host->can_queue = numacb; 605 pr_warn("numacb=%d ignored\n", numacb);
550 else
551 printk("numacb=%d ignored\n", numacb);
552 }
553 } 606 }
554 607
555 if (host->can_queue > AAC_NUM_IO_FIB)
556 host->can_queue = AAC_NUM_IO_FIB;
557
558 if (dev->pdev->device == PMC_DEVICE_S6 || 608 if (dev->pdev->device == PMC_DEVICE_S6 ||
559 dev->pdev->device == PMC_DEVICE_S7 || 609 dev->pdev->device == PMC_DEVICE_S7 ||
560 dev->pdev->device == PMC_DEVICE_S8 || 610 dev->pdev->device == PMC_DEVICE_S8 ||
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 9e7551fe4b19..969727b67cdd 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -43,6 +44,7 @@
43#include <linux/kthread.h> 44#include <linux/kthread.h>
44#include <linux/interrupt.h> 45#include <linux/interrupt.h>
45#include <linux/semaphore.h> 46#include <linux/semaphore.h>
47#include <linux/bcd.h>
46#include <scsi/scsi.h> 48#include <scsi/scsi.h>
47#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
48#include <scsi/scsi_device.h> 50#include <scsi/scsi_device.h>
@@ -60,12 +62,22 @@
60 62
61static int fib_map_alloc(struct aac_dev *dev) 63static int fib_map_alloc(struct aac_dev *dev)
62{ 64{
65 if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
66 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
67 else
68 dev->max_cmd_size = dev->max_fib_size;
69 if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
70 dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
71 } else {
72 dev->max_cmd_size = dev->max_fib_size;
73 }
74
63 dprintk((KERN_INFO 75 dprintk((KERN_INFO
64 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", 76 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
65 dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue, 77 dev->pdev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
66 AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); 78 AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
67 dev->hw_fib_va = pci_alloc_consistent(dev->pdev, 79 dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
68 (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) 80 (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
69 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), 81 * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
70 &dev->hw_fib_pa); 82 &dev->hw_fib_pa);
71 if (dev->hw_fib_va == NULL) 83 if (dev->hw_fib_va == NULL)
@@ -83,9 +95,9 @@ static int fib_map_alloc(struct aac_dev *dev)
83 95
84void aac_fib_map_free(struct aac_dev *dev) 96void aac_fib_map_free(struct aac_dev *dev)
85{ 97{
86 if (dev->hw_fib_va && dev->max_fib_size) { 98 if (dev->hw_fib_va && dev->max_cmd_size) {
87 pci_free_consistent(dev->pdev, 99 pci_free_consistent(dev->pdev,
88 (dev->max_fib_size * 100 (dev->max_cmd_size *
89 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)), 101 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
90 dev->hw_fib_va, dev->hw_fib_pa); 102 dev->hw_fib_va, dev->hw_fib_pa);
91 } 103 }
@@ -129,11 +141,14 @@ int aac_fib_setup(struct aac_dev * dev)
129 struct hw_fib *hw_fib; 141 struct hw_fib *hw_fib;
130 dma_addr_t hw_fib_pa; 142 dma_addr_t hw_fib_pa;
131 int i; 143 int i;
144 u32 max_cmds;
132 145
133 while (((i = fib_map_alloc(dev)) == -ENOMEM) 146 while (((i = fib_map_alloc(dev)) == -ENOMEM)
134 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) { 147 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
135 dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1); 148 max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
136 dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB; 149 dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
150 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
151 dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
137 } 152 }
138 if (i<0) 153 if (i<0)
139 return -ENOMEM; 154 return -ENOMEM;
@@ -144,7 +159,7 @@ int aac_fib_setup(struct aac_dev * dev)
144 (hw_fib_pa - dev->hw_fib_pa)); 159 (hw_fib_pa - dev->hw_fib_pa));
145 dev->hw_fib_pa = hw_fib_pa; 160 dev->hw_fib_pa = hw_fib_pa;
146 memset(dev->hw_fib_va, 0, 161 memset(dev->hw_fib_va, 0,
147 (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) * 162 (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
148 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); 163 (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
149 164
150 /* add Xport header */ 165 /* add Xport header */
@@ -170,12 +185,22 @@ int aac_fib_setup(struct aac_dev * dev)
170 sema_init(&fibptr->event_wait, 0); 185 sema_init(&fibptr->event_wait, 0);
171 spin_lock_init(&fibptr->event_lock); 186 spin_lock_init(&fibptr->event_lock);
172 hw_fib->header.XferState = cpu_to_le32(0xffffffff); 187 hw_fib->header.XferState = cpu_to_le32(0xffffffff);
173 hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); 188 hw_fib->header.SenderSize =
189 cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
174 fibptr->hw_fib_pa = hw_fib_pa; 190 fibptr->hw_fib_pa = hw_fib_pa;
191 fibptr->hw_sgl_pa = hw_fib_pa +
192 offsetof(struct aac_hba_cmd_req, sge[2]);
193 /*
194 * one element is for the ptr to the separate sg list,
195 * second element for 32 byte alignment
196 */
197 fibptr->hw_error_pa = hw_fib_pa +
198 offsetof(struct aac_native_hba, resp.resp_bytes[0]);
199
175 hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + 200 hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
176 dev->max_fib_size + sizeof(struct aac_fib_xporthdr)); 201 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
177 hw_fib_pa = hw_fib_pa + 202 hw_fib_pa = hw_fib_pa +
178 dev->max_fib_size + sizeof(struct aac_fib_xporthdr); 203 dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
179 } 204 }
180 205
181 /* 206 /*
@@ -273,7 +298,8 @@ void aac_fib_free(struct fib *fibptr)
273 spin_lock_irqsave(&fibptr->dev->fib_lock, flags); 298 spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
274 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) 299 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
275 aac_config.fib_timeouts++; 300 aac_config.fib_timeouts++;
276 if (fibptr->hw_fib_va->header.XferState != 0) { 301 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
302 fibptr->hw_fib_va->header.XferState != 0) {
277 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 303 printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
278 (void*)fibptr, 304 (void*)fibptr,
279 le32_to_cpu(fibptr->hw_fib_va->header.XferState)); 305 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
@@ -501,8 +527,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
501 * Map the fib into 32bits by using the fib number 527 * Map the fib into 32bits by using the fib number
502 */ 528 */
503 529
504 hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2); 530 hw_fib->header.SenderFibAddress =
505 hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1; 531 cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
532
533 /* use the same shifted value for handle to be compatible
534 * with the new native hba command handle
535 */
536 hw_fib->header.Handle =
537 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
538
506 /* 539 /*
507 * Set FIB state to indicate where it came from and if we want a 540 * Set FIB state to indicate where it came from and if we want a
508 * response from the adapter. Also load the command from the 541 * response from the adapter. Also load the command from the
@@ -670,6 +703,82 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
670 return 0; 703 return 0;
671} 704}
672 705
706int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
707 void *callback_data)
708{
709 struct aac_dev *dev = fibptr->dev;
710 int wait;
711 unsigned long flags = 0;
712 unsigned long mflags = 0;
713
714 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
715 if (callback) {
716 wait = 0;
717 fibptr->callback = callback;
718 fibptr->callback_data = callback_data;
719 } else
720 wait = 1;
721
722
723 if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
724 struct aac_hba_cmd_req *hbacmd =
725 (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
726
727 hbacmd->iu_type = command;
728 /* bit1 of request_id must be 0 */
729 hbacmd->request_id =
730 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
731 } else
732 return -EINVAL;
733
734
735 if (wait) {
736 spin_lock_irqsave(&dev->manage_lock, mflags);
737 if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
738 spin_unlock_irqrestore(&dev->manage_lock, mflags);
739 return -EBUSY;
740 }
741 dev->management_fib_count++;
742 spin_unlock_irqrestore(&dev->manage_lock, mflags);
743 spin_lock_irqsave(&fibptr->event_lock, flags);
744 }
745
746 if (aac_adapter_deliver(fibptr) != 0) {
747 if (wait) {
748 spin_unlock_irqrestore(&fibptr->event_lock, flags);
749 spin_lock_irqsave(&dev->manage_lock, mflags);
750 dev->management_fib_count--;
751 spin_unlock_irqrestore(&dev->manage_lock, mflags);
752 }
753 return -EBUSY;
754 }
755 FIB_COUNTER_INCREMENT(aac_config.NativeSent);
756
757 if (wait) {
758 spin_unlock_irqrestore(&fibptr->event_lock, flags);
759 /* Only set for first known interruptable command */
760 if (down_interruptible(&fibptr->event_wait)) {
761 fibptr->done = 2;
762 up(&fibptr->event_wait);
763 }
764 spin_lock_irqsave(&fibptr->event_lock, flags);
765 if ((fibptr->done == 0) || (fibptr->done == 2)) {
766 fibptr->done = 2; /* Tell interrupt we aborted */
767 spin_unlock_irqrestore(&fibptr->event_lock, flags);
768 return -ERESTARTSYS;
769 }
770 spin_unlock_irqrestore(&fibptr->event_lock, flags);
771 WARN_ON(fibptr->done == 0);
772
773 if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
774 return -ETIMEDOUT;
775
776 return 0;
777 }
778
779 return -EINPROGRESS;
780}
781
673/** 782/**
674 * aac_consumer_get - get the top of the queue 783 * aac_consumer_get - get the top of the queue
675 * @dev: Adapter 784 * @dev: Adapter
@@ -761,7 +870,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
761 unsigned long qflags; 870 unsigned long qflags;
762 871
763 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 || 872 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
764 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { 873 dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
874 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
765 kfree(hw_fib); 875 kfree(hw_fib);
766 return 0; 876 return 0;
767 } 877 }
@@ -827,11 +937,17 @@ int aac_fib_complete(struct fib *fibptr)
827{ 937{
828 struct hw_fib * hw_fib = fibptr->hw_fib_va; 938 struct hw_fib * hw_fib = fibptr->hw_fib_va;
829 939
940 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
941 fib_dealloc(fibptr);
942 return 0;
943 }
944
830 /* 945 /*
831 * Check for a fib which has already been completed 946 * Check for a fib which has already been completed or with a
947 * status wait timeout
832 */ 948 */
833 949
834 if (hw_fib->header.XferState == 0) 950 if (hw_fib->header.XferState == 0 || fibptr->done == 2)
835 return 0; 951 return 0;
836 /* 952 /*
837 * If we plan to do anything check the structure type first. 953 * If we plan to do anything check the structure type first.
@@ -984,20 +1100,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
984 lun = (container >> 16) & 0xFF; 1100 lun = (container >> 16) & 0xFF;
985 container = (u32)-1; 1101 container = (u32)-1;
986 channel = aac_phys_to_logical(channel); 1102 channel = aac_phys_to_logical(channel);
987 device_config_needed = 1103 device_config_needed = DELETE;
988 (((__le32 *)aifcmd->data)[0] ==
989 cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
990
991 if (device_config_needed == ADD) {
992 device = scsi_device_lookup(
993 dev->scsi_host_ptr,
994 channel, id, lun);
995 if (device) {
996 scsi_remove_device(device);
997 scsi_device_put(device);
998 }
999 }
1000 break; 1104 break;
1105
1001 /* 1106 /*
1002 * Morph or Expand complete 1107 * Morph or Expand complete
1003 */ 1108 */
@@ -1351,7 +1456,7 @@ retry_next:
1351 } 1456 }
1352} 1457}
1353 1458
1354static int _aac_reset_adapter(struct aac_dev *aac, int forced) 1459static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1355{ 1460{
1356 int index, quirks; 1461 int index, quirks;
1357 int retval; 1462 int retval;
@@ -1360,6 +1465,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1360 struct scsi_cmnd *command; 1465 struct scsi_cmnd *command;
1361 struct scsi_cmnd *command_list; 1466 struct scsi_cmnd *command_list;
1362 int jafo = 0; 1467 int jafo = 0;
1468 int bled;
1363 1469
1364 /* 1470 /*
1365 * Assumptions: 1471 * Assumptions:
@@ -1384,7 +1490,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1384 * If a positive health, means in a known DEAD PANIC 1490 * If a positive health, means in a known DEAD PANIC
1385 * state and the adapter could be reset to `try again'. 1491 * state and the adapter could be reset to `try again'.
1386 */ 1492 */
1387 retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac)); 1493 bled = forced ? 0 : aac_adapter_check_health(aac);
1494 retval = aac_adapter_restart(aac, bled, reset_type);
1388 1495
1389 if (retval) 1496 if (retval)
1390 goto out; 1497 goto out;
@@ -1494,11 +1601,12 @@ out:
1494 return retval; 1601 return retval;
1495} 1602}
1496 1603
1497int aac_reset_adapter(struct aac_dev * aac, int forced) 1604int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1498{ 1605{
1499 unsigned long flagv = 0; 1606 unsigned long flagv = 0;
1500 int retval; 1607 int retval;
1501 struct Scsi_Host * host; 1608 struct Scsi_Host * host;
1609 int bled;
1502 1610
1503 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) 1611 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1504 return -EBUSY; 1612 return -EBUSY;
@@ -1547,7 +1655,9 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
1547 if (forced < 2) 1655 if (forced < 2)
1548 aac_send_shutdown(aac); 1656 aac_send_shutdown(aac);
1549 spin_lock_irqsave(host->host_lock, flagv); 1657 spin_lock_irqsave(host->host_lock, flagv);
1550 retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1))); 1658 bled = forced ? forced :
1659 (aac_check_reset != 0 && aac_check_reset != 1);
1660 retval = _aac_reset_adapter(aac, bled, reset_type);
1551 spin_unlock_irqrestore(host->host_lock, flagv); 1661 spin_unlock_irqrestore(host->host_lock, flagv);
1552 1662
1553 if ((forced < 2) && (retval == -ENODEV)) { 1663 if ((forced < 2) && (retval == -ENODEV)) {
@@ -1593,6 +1703,7 @@ int aac_check_health(struct aac_dev * aac)
1593 unsigned long time_now, flagv = 0; 1703 unsigned long time_now, flagv = 0;
1594 struct list_head * entry; 1704 struct list_head * entry;
1595 struct Scsi_Host * host; 1705 struct Scsi_Host * host;
1706 int bled;
1596 1707
1597 /* Extending the scope of fib_lock slightly to protect aac->in_reset */ 1708 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1598 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) 1709 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1710,7 +1821,8 @@ int aac_check_health(struct aac_dev * aac)
1710 host = aac->scsi_host_ptr; 1821 host = aac->scsi_host_ptr;
1711 if (aac->thread->pid != current->pid) 1822 if (aac->thread->pid != current->pid)
1712 spin_lock_irqsave(host->host_lock, flagv); 1823 spin_lock_irqsave(host->host_lock, flagv);
1713 BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1); 1824 bled = aac_check_reset != 1 ? 1 : 0;
1825 _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
1714 if (aac->thread->pid != current->pid) 1826 if (aac->thread->pid != current->pid)
1715 spin_unlock_irqrestore(host->host_lock, flagv); 1827 spin_unlock_irqrestore(host->host_lock, flagv);
1716 return BlinkLED; 1828 return BlinkLED;
@@ -1721,6 +1833,552 @@ out:
1721} 1833}
1722 1834
1723 1835
1836static void aac_resolve_luns(struct aac_dev *dev)
1837{
1838 int bus, target, channel;
1839 struct scsi_device *sdev;
1840 u8 devtype;
1841 u8 new_devtype;
1842
1843 for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
1844 for (target = 0; target < AAC_MAX_TARGETS; target++) {
1845
1846 if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL)
1847 continue;
1848
1849 if (bus == CONTAINER_CHANNEL)
1850 channel = CONTAINER_CHANNEL;
1851 else
1852 channel = aac_phys_to_logical(bus);
1853
1854 devtype = dev->hba_map[bus][target].devtype;
1855 new_devtype = dev->hba_map[bus][target].new_devtype;
1856
1857 sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
1858 target, 0);
1859
1860 if (!sdev && devtype)
1861 scsi_add_device(dev->scsi_host_ptr, channel,
1862 target, 0);
1863 else if (sdev && new_devtype != devtype)
1864 scsi_remove_device(sdev);
1865 else if (sdev && new_devtype == devtype)
1866 scsi_rescan_device(&sdev->sdev_gendev);
1867
1868 if (sdev)
1869 scsi_device_put(sdev);
1870
1871 dev->hba_map[bus][target].devtype = new_devtype;
1872 }
1873 }
1874}
1875
1876/**
1877 * aac_handle_sa_aif Handle a message from the firmware
1878 * @dev: Which adapter this fib is from
1879 * @fibptr: Pointer to fibptr from adapter
1880 *
1881 * This routine handles a driver notify fib from the adapter and
1882 * dispatches it to the appropriate routine for handling.
1883 */
1884static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1885{
1886 int i, bus, target, container, rcode = 0;
1887 u32 events = 0;
1888 struct fib *fib;
1889 struct scsi_device *sdev;
1890
1891 if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
1892 events = SA_AIF_HOTPLUG;
1893 else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
1894 events = SA_AIF_HARDWARE;
1895 else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
1896 events = SA_AIF_PDEV_CHANGE;
1897 else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
1898 events = SA_AIF_LDEV_CHANGE;
1899 else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
1900 events = SA_AIF_BPSTAT_CHANGE;
1901 else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
1902 events = SA_AIF_BPCFG_CHANGE;
1903
1904 switch (events) {
1905 case SA_AIF_HOTPLUG:
1906 case SA_AIF_HARDWARE:
1907 case SA_AIF_PDEV_CHANGE:
1908 case SA_AIF_LDEV_CHANGE:
1909 case SA_AIF_BPCFG_CHANGE:
1910
1911 fib = aac_fib_alloc(dev);
1912 if (!fib) {
1913 pr_err("aac_handle_sa_aif: out of memory\n");
1914 return;
1915 }
1916 for (bus = 0; bus < AAC_MAX_BUSES; bus++)
1917 for (target = 0; target < AAC_MAX_TARGETS; target++)
1918 dev->hba_map[bus][target].new_devtype = 0;
1919
1920 rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
1921
1922 if (rcode != -ERESTARTSYS)
1923 aac_fib_free(fib);
1924
1925 aac_resolve_luns(dev);
1926
1927 if (events == SA_AIF_LDEV_CHANGE ||
1928 events == SA_AIF_BPCFG_CHANGE) {
1929 aac_get_containers(dev);
1930 for (container = 0; container <
1931 dev->maximum_num_containers; ++container) {
1932 sdev = scsi_device_lookup(dev->scsi_host_ptr,
1933 CONTAINER_CHANNEL,
1934 container, 0);
1935 if (dev->fsa_dev[container].valid && !sdev) {
1936 scsi_add_device(dev->scsi_host_ptr,
1937 CONTAINER_CHANNEL,
1938 container, 0);
1939 } else if (!dev->fsa_dev[container].valid &&
1940 sdev) {
1941 scsi_remove_device(sdev);
1942 scsi_device_put(sdev);
1943 } else if (sdev) {
1944 scsi_rescan_device(&sdev->sdev_gendev);
1945 scsi_device_put(sdev);
1946 }
1947 }
1948 }
1949 break;
1950
1951 case SA_AIF_BPSTAT_CHANGE:
1952 /* currently do nothing */
1953 break;
1954 }
1955
1956 for (i = 1; i <= 10; ++i) {
1957 events = src_readl(dev, MUnit.IDR);
1958 if (events & (1<<23)) {
1959 pr_warn(" AIF not cleared by firmware - %d/%d)\n",
1960 i, 10);
1961 ssleep(1);
1962 }
1963 }
1964}
1965
1966static int get_fib_count(struct aac_dev *dev)
1967{
1968 unsigned int num = 0;
1969 struct list_head *entry;
1970 unsigned long flagv;
1971
1972 /*
1973 * Warning: no sleep allowed while
1974 * holding spinlock. We take the estimate
1975 * and pre-allocate a set of fibs outside the
1976 * lock.
1977 */
1978 num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
1979 / sizeof(struct hw_fib); /* some extra */
1980 spin_lock_irqsave(&dev->fib_lock, flagv);
1981 entry = dev->fib_list.next;
1982 while (entry != &dev->fib_list) {
1983 entry = entry->next;
1984 ++num;
1985 }
1986 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1987
1988 return num;
1989}
1990
1991static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
1992 struct fib **fib_pool,
1993 unsigned int num)
1994{
1995 struct hw_fib **hw_fib_p;
1996 struct fib **fib_p;
1997 int rcode = 1;
1998
1999 hw_fib_p = hw_fib_pool;
2000 fib_p = fib_pool;
2001 while (hw_fib_p < &hw_fib_pool[num]) {
2002 *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
2003 if (!(*(hw_fib_p++))) {
2004 --hw_fib_p;
2005 break;
2006 }
2007
2008 *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
2009 if (!(*(fib_p++))) {
2010 kfree(*(--hw_fib_p));
2011 break;
2012 }
2013 }
2014
2015 num = hw_fib_p - hw_fib_pool;
2016 if (!num)
2017 rcode = 0;
2018
2019 return rcode;
2020}
2021
2022static void wakeup_fibctx_threads(struct aac_dev *dev,
2023 struct hw_fib **hw_fib_pool,
2024 struct fib **fib_pool,
2025 struct fib *fib,
2026 struct hw_fib *hw_fib,
2027 unsigned int num)
2028{
2029 unsigned long flagv;
2030 struct list_head *entry;
2031 struct hw_fib **hw_fib_p;
2032 struct fib **fib_p;
2033 u32 time_now, time_last;
2034 struct hw_fib *hw_newfib;
2035 struct fib *newfib;
2036 struct aac_fib_context *fibctx;
2037
2038 time_now = jiffies/HZ;
2039 spin_lock_irqsave(&dev->fib_lock, flagv);
2040 entry = dev->fib_list.next;
2041 /*
2042 * For each Context that is on the
2043 * fibctxList, make a copy of the
2044 * fib, and then set the event to wake up the
2045 * thread that is waiting for it.
2046 */
2047
2048 hw_fib_p = hw_fib_pool;
2049 fib_p = fib_pool;
2050 while (entry != &dev->fib_list) {
2051 /*
2052 * Extract the fibctx
2053 */
2054 fibctx = list_entry(entry, struct aac_fib_context,
2055 next);
2056 /*
2057 * Check if the queue is getting
2058 * backlogged
2059 */
2060 if (fibctx->count > 20) {
2061 /*
2062 * It's *not* jiffies folks,
2063 * but jiffies / HZ so do not
2064 * panic ...
2065 */
2066 time_last = fibctx->jiffies;
2067 /*
2068 * Has it been > 2 minutes
2069 * since the last read off
2070 * the queue?
2071 */
2072 if ((time_now - time_last) > aif_timeout) {
2073 entry = entry->next;
2074 aac_close_fib_context(dev, fibctx);
2075 continue;
2076 }
2077 }
2078 /*
2079 * Warning: no sleep allowed while
2080 * holding spinlock
2081 */
2082 if (hw_fib_p >= &hw_fib_pool[num]) {
2083 pr_warn("aifd: didn't allocate NewFib\n");
2084 entry = entry->next;
2085 continue;
2086 }
2087
2088 hw_newfib = *hw_fib_p;
2089 *(hw_fib_p++) = NULL;
2090 newfib = *fib_p;
2091 *(fib_p++) = NULL;
2092 /*
2093 * Make the copy of the FIB
2094 */
2095 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
2096 memcpy(newfib, fib, sizeof(struct fib));
2097 newfib->hw_fib_va = hw_newfib;
2098 /*
2099 * Put the FIB onto the
2100 * fibctx's fibs
2101 */
2102 list_add_tail(&newfib->fiblink, &fibctx->fib_list);
2103 fibctx->count++;
2104 /*
2105 * Set the event to wake up the
2106 * thread that is waiting.
2107 */
2108 up(&fibctx->wait_sem);
2109
2110 entry = entry->next;
2111 }
2112 /*
2113 * Set the status of this FIB
2114 */
2115 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2116 aac_fib_adapter_complete(fib, sizeof(u32));
2117 spin_unlock_irqrestore(&dev->fib_lock, flagv);
2118
2119}
2120
2121static void aac_process_events(struct aac_dev *dev)
2122{
2123 struct hw_fib *hw_fib;
2124 struct fib *fib;
2125 unsigned long flags;
2126 spinlock_t *t_lock;
2127 unsigned int rcode;
2128
2129 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2130 spin_lock_irqsave(t_lock, flags);
2131
2132 while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
2133 struct list_head *entry;
2134 struct aac_aifcmd *aifcmd;
2135 unsigned int num;
2136 struct hw_fib **hw_fib_pool, **hw_fib_p;
2137 struct fib **fib_pool, **fib_p;
2138
2139 set_current_state(TASK_RUNNING);
2140
2141 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
2142 list_del(entry);
2143
2144 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2145 spin_unlock_irqrestore(t_lock, flags);
2146
2147 fib = list_entry(entry, struct fib, fiblink);
2148 hw_fib = fib->hw_fib_va;
2149 if (dev->sa_firmware) {
2150 /* Thor AIF */
2151 aac_handle_sa_aif(dev, fib);
2152 aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2153 continue;
2154 }
2155 /*
2156 * We will process the FIB here or pass it to a
2157 * worker thread that is TBD. We Really can't
2158 * do anything at this point since we don't have
2159 * anything defined for this thread to do.
2160 */
2161 memset(fib, 0, sizeof(struct fib));
2162 fib->type = FSAFS_NTC_FIB_CONTEXT;
2163 fib->size = sizeof(struct fib);
2164 fib->hw_fib_va = hw_fib;
2165 fib->data = hw_fib->data;
2166 fib->dev = dev;
2167 /*
2168 * We only handle AifRequest fibs from the adapter.
2169 */
2170
2171 aifcmd = (struct aac_aifcmd *) hw_fib->data;
2172 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
2173 /* Handle Driver Notify Events */
2174 aac_handle_aif(dev, fib);
2175 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2176 aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2177 goto free_fib;
2178 }
2179 /*
2180 * The u32 here is important and intended. We are using
2181 * 32bit wrapping time to fit the adapter field
2182 */
2183
2184 /* Sniff events */
2185 if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
2186 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
2187 aac_handle_aif(dev, fib);
2188 }
2189
2190 /*
2191 * get number of fibs to process
2192 */
2193 num = get_fib_count(dev);
2194 if (!num)
2195 goto free_fib;
2196
2197 hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
2198 GFP_KERNEL);
2199 if (!hw_fib_pool)
2200 goto free_fib;
2201
2202 fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
2203 if (!fib_pool)
2204 goto free_hw_fib_pool;
2205
2206 /*
2207 * Fill up fib pointer pools with actual fibs
2208 * and hw_fibs
2209 */
2210 rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2211 if (!rcode)
2212 goto free_mem;
2213
2214 /*
2215 * wakeup the thread that is waiting for
2216 * the response from fw (ioctl)
2217 */
2218 wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
2219 fib, hw_fib, num);
2220
2221free_mem:
2222 /* Free up the remaining resources */
2223 hw_fib_p = hw_fib_pool;
2224 fib_p = fib_pool;
2225 while (hw_fib_p < &hw_fib_pool[num]) {
2226 kfree(*hw_fib_p);
2227 kfree(*fib_p);
2228 ++fib_p;
2229 ++hw_fib_p;
2230 }
2231 kfree(fib_pool);
2232free_hw_fib_pool:
2233 kfree(hw_fib_pool);
2234free_fib:
2235 kfree(fib);
2236 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2237 spin_lock_irqsave(t_lock, flags);
2238 }
2239 /*
2240 * There are no more AIF's
2241 */
2242 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2243 spin_unlock_irqrestore(t_lock, flags);
2244}
2245
2246static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
2247 u32 datasize)
2248{
2249 struct aac_srb *srbcmd;
2250 struct sgmap64 *sg64;
2251 dma_addr_t addr;
2252 char *dma_buf;
2253 struct fib *fibptr;
2254 int ret = -ENOMEM;
2255 u32 vbus, vid;
2256
2257 fibptr = aac_fib_alloc(dev);
2258 if (!fibptr)
2259 goto out;
2260
2261 dma_buf = pci_alloc_consistent(dev->pdev, datasize, &addr);
2262 if (!dma_buf)
2263 goto fib_free_out;
2264
2265 aac_fib_init(fibptr);
2266
2267 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
2268 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
2269
2270 srbcmd = (struct aac_srb *)fib_data(fibptr);
2271
2272 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2273 srbcmd->channel = cpu_to_le32(vbus);
2274 srbcmd->id = cpu_to_le32(vid);
2275 srbcmd->lun = 0;
2276 srbcmd->flags = cpu_to_le32(SRB_DataOut);
2277 srbcmd->timeout = cpu_to_le32(10);
2278 srbcmd->retry_limit = 0;
2279 srbcmd->cdb_size = cpu_to_le32(12);
2280 srbcmd->count = cpu_to_le32(datasize);
2281
2282 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2283 srbcmd->cdb[0] = BMIC_OUT;
2284 srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
2285 memcpy(dma_buf, (char *)wellness_str, datasize);
2286
2287 sg64 = (struct sgmap64 *)&srbcmd->sg;
2288 sg64->count = cpu_to_le32(1);
2289 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
2290 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2291 sg64->sg[0].count = cpu_to_le32(datasize);
2292
2293 ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
2294 FsaNormal, 1, 1, NULL, NULL);
2295
2296 pci_free_consistent(dev->pdev, datasize, (void *)dma_buf, addr);
2297
2298 /*
2299 * Do not set XferState to zero unless
2300 * receives a response from F/W
2301 */
2302 if (ret >= 0)
2303 aac_fib_complete(fibptr);
2304
2305 /*
2306 * FIB should be freed only after
2307 * getting the response from the F/W
2308 */
2309 if (ret != -ERESTARTSYS)
2310 goto fib_free_out;
2311
2312out:
2313 return ret;
2314fib_free_out:
2315 aac_fib_free(fibptr);
2316 goto out;
2317}
2318
2319int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now)
2320{
2321 struct tm cur_tm;
2322 char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2323 u32 datasize = sizeof(wellness_str);
2324 unsigned long local_time;
2325 int ret = -ENODEV;
2326
2327 if (!dev->sa_firmware)
2328 goto out;
2329
2330 local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60));
2331 time_to_tm(local_time, 0, &cur_tm);
2332 cur_tm.tm_mon += 1;
2333 cur_tm.tm_year += 1900;
2334 wellness_str[8] = bin2bcd(cur_tm.tm_hour);
2335 wellness_str[9] = bin2bcd(cur_tm.tm_min);
2336 wellness_str[10] = bin2bcd(cur_tm.tm_sec);
2337 wellness_str[12] = bin2bcd(cur_tm.tm_mon);
2338 wellness_str[13] = bin2bcd(cur_tm.tm_mday);
2339 wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
2340 wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
2341
2342 ret = aac_send_wellness_command(dev, wellness_str, datasize);
2343
2344out:
2345 return ret;
2346}
2347
2348int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
2349{
2350 int ret = -ENOMEM;
2351 struct fib *fibptr;
2352 __le32 *info;
2353
2354 fibptr = aac_fib_alloc(dev);
2355 if (!fibptr)
2356 goto out;
2357
2358 aac_fib_init(fibptr);
2359 info = (__le32 *)fib_data(fibptr);
2360 *info = cpu_to_le32(now->tv_sec);
2361 ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
2362 1, 1, NULL, NULL);
2363
2364 /*
2365 * Do not set XferState to zero unless
2366 * receives a response from F/W
2367 */
2368 if (ret >= 0)
2369 aac_fib_complete(fibptr);
2370
2371 /*
2372 * FIB should be freed only after
2373 * getting the response from the F/W
2374 */
2375 if (ret != -ERESTARTSYS)
2376 aac_fib_free(fibptr);
2377
2378out:
2379 return ret;
2380}
2381
1724/** 2382/**
1725 * aac_command_thread - command processing thread 2383 * aac_command_thread - command processing thread
1726 * @dev: Adapter to monitor 2384 * @dev: Adapter to monitor
@@ -1734,10 +2392,6 @@ out:
1734int aac_command_thread(void *data) 2392int aac_command_thread(void *data)
1735{ 2393{
1736 struct aac_dev *dev = data; 2394 struct aac_dev *dev = data;
1737 struct hw_fib *hw_fib, *hw_newfib;
1738 struct fib *fib, *newfib;
1739 struct aac_fib_context *fibctx;
1740 unsigned long flags;
1741 DECLARE_WAITQUEUE(wait, current); 2395 DECLARE_WAITQUEUE(wait, current);
1742 unsigned long next_jiffies = jiffies + HZ; 2396 unsigned long next_jiffies = jiffies + HZ;
1743 unsigned long next_check_jiffies = next_jiffies; 2397 unsigned long next_check_jiffies = next_jiffies;
@@ -1757,196 +2411,8 @@ int aac_command_thread(void *data)
1757 set_current_state(TASK_INTERRUPTIBLE); 2411 set_current_state(TASK_INTERRUPTIBLE);
1758 dprintk ((KERN_INFO "aac_command_thread start\n")); 2412 dprintk ((KERN_INFO "aac_command_thread start\n"));
1759 while (1) { 2413 while (1) {
1760 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1761 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1762 struct list_head *entry;
1763 struct aac_aifcmd * aifcmd;
1764
1765 set_current_state(TASK_RUNNING);
1766 2414
1767 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; 2415 aac_process_events(dev);
1768 list_del(entry);
1769
1770 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1771 fib = list_entry(entry, struct fib, fiblink);
1772 /*
1773 * We will process the FIB here or pass it to a
1774 * worker thread that is TBD. We Really can't
1775 * do anything at this point since we don't have
1776 * anything defined for this thread to do.
1777 */
1778 hw_fib = fib->hw_fib_va;
1779 memset(fib, 0, sizeof(struct fib));
1780 fib->type = FSAFS_NTC_FIB_CONTEXT;
1781 fib->size = sizeof(struct fib);
1782 fib->hw_fib_va = hw_fib;
1783 fib->data = hw_fib->data;
1784 fib->dev = dev;
1785 /*
1786 * We only handle AifRequest fibs from the adapter.
1787 */
1788 aifcmd = (struct aac_aifcmd *) hw_fib->data;
1789 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1790 /* Handle Driver Notify Events */
1791 aac_handle_aif(dev, fib);
1792 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1793 aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1794 } else {
1795 /* The u32 here is important and intended. We are using
1796 32bit wrapping time to fit the adapter field */
1797
1798 u32 time_now, time_last;
1799 unsigned long flagv;
1800 unsigned num;
1801 struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1802 struct fib ** fib_pool, ** fib_p;
1803
1804 /* Sniff events */
1805 if ((aifcmd->command ==
1806 cpu_to_le32(AifCmdEventNotify)) ||
1807 (aifcmd->command ==
1808 cpu_to_le32(AifCmdJobProgress))) {
1809 aac_handle_aif(dev, fib);
1810 }
1811
1812 time_now = jiffies/HZ;
1813
1814 /*
1815 * Warning: no sleep allowed while
1816 * holding spinlock. We take the estimate
1817 * and pre-allocate a set of fibs outside the
1818 * lock.
1819 */
1820 num = le32_to_cpu(dev->init->AdapterFibsSize)
1821 / sizeof(struct hw_fib); /* some extra */
1822 spin_lock_irqsave(&dev->fib_lock, flagv);
1823 entry = dev->fib_list.next;
1824 while (entry != &dev->fib_list) {
1825 entry = entry->next;
1826 ++num;
1827 }
1828 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1829 hw_fib_pool = NULL;
1830 fib_pool = NULL;
1831 if (num
1832 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1833 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1834 hw_fib_p = hw_fib_pool;
1835 fib_p = fib_pool;
1836 while (hw_fib_p < &hw_fib_pool[num]) {
1837 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1838 --hw_fib_p;
1839 break;
1840 }
1841 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1842 kfree(*(--hw_fib_p));
1843 break;
1844 }
1845 }
1846 if ((num = hw_fib_p - hw_fib_pool) == 0) {
1847 kfree(fib_pool);
1848 fib_pool = NULL;
1849 kfree(hw_fib_pool);
1850 hw_fib_pool = NULL;
1851 }
1852 } else {
1853 kfree(hw_fib_pool);
1854 hw_fib_pool = NULL;
1855 }
1856 spin_lock_irqsave(&dev->fib_lock, flagv);
1857 entry = dev->fib_list.next;
1858 /*
1859 * For each Context that is on the
1860 * fibctxList, make a copy of the
1861 * fib, and then set the event to wake up the
1862 * thread that is waiting for it.
1863 */
1864 hw_fib_p = hw_fib_pool;
1865 fib_p = fib_pool;
1866 while (entry != &dev->fib_list) {
1867 /*
1868 * Extract the fibctx
1869 */
1870 fibctx = list_entry(entry, struct aac_fib_context, next);
1871 /*
1872 * Check if the queue is getting
1873 * backlogged
1874 */
1875 if (fibctx->count > 20)
1876 {
1877 /*
1878 * It's *not* jiffies folks,
1879 * but jiffies / HZ so do not
1880 * panic ...
1881 */
1882 time_last = fibctx->jiffies;
1883 /*
1884 * Has it been > 2 minutes
1885 * since the last read off
1886 * the queue?
1887 */
1888 if ((time_now - time_last) > aif_timeout) {
1889 entry = entry->next;
1890 aac_close_fib_context(dev, fibctx);
1891 continue;
1892 }
1893 }
1894 /*
1895 * Warning: no sleep allowed while
1896 * holding spinlock
1897 */
1898 if (hw_fib_p < &hw_fib_pool[num]) {
1899 hw_newfib = *hw_fib_p;
1900 *(hw_fib_p++) = NULL;
1901 newfib = *fib_p;
1902 *(fib_p++) = NULL;
1903 /*
1904 * Make the copy of the FIB
1905 */
1906 memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1907 memcpy(newfib, fib, sizeof(struct fib));
1908 newfib->hw_fib_va = hw_newfib;
1909 /*
1910 * Put the FIB onto the
1911 * fibctx's fibs
1912 */
1913 list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1914 fibctx->count++;
1915 /*
1916 * Set the event to wake up the
1917 * thread that is waiting.
1918 */
1919 up(&fibctx->wait_sem);
1920 } else {
1921 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1922 }
1923 entry = entry->next;
1924 }
1925 /*
1926 * Set the status of this FIB
1927 */
1928 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1929 aac_fib_adapter_complete(fib, sizeof(u32));
1930 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1931 /* Free up the remaining resources */
1932 hw_fib_p = hw_fib_pool;
1933 fib_p = fib_pool;
1934 while (hw_fib_p < &hw_fib_pool[num]) {
1935 kfree(*hw_fib_p);
1936 kfree(*fib_p);
1937 ++fib_p;
1938 ++hw_fib_p;
1939 }
1940 kfree(hw_fib_pool);
1941 kfree(fib_pool);
1942 }
1943 kfree(fib);
1944 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1945 }
1946 /*
1947 * There are no more AIF's
1948 */
1949 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1950 2416
1951 /* 2417 /*
1952 * Background activity 2418 * Background activity
@@ -1968,7 +2434,7 @@ int aac_command_thread(void *data)
1968 2434
1969 /* Don't even try to talk to adapter if its sick */ 2435 /* Don't even try to talk to adapter if its sick */
1970 ret = aac_check_health(dev); 2436 ret = aac_check_health(dev);
1971 if (!ret && !dev->queues) 2437 if (!dev->queues)
1972 break; 2438 break;
1973 next_check_jiffies = jiffies 2439 next_check_jiffies = jiffies
1974 + ((long)(unsigned)check_interval) 2440 + ((long)(unsigned)check_interval)
@@ -1981,36 +2447,16 @@ int aac_command_thread(void *data)
1981 difference = (((1000000 - now.tv_usec) * HZ) 2447 difference = (((1000000 - now.tv_usec) * HZ)
1982 + 500000) / 1000000; 2448 + 500000) / 1000000;
1983 else if (ret == 0) { 2449 else if (ret == 0) {
1984 struct fib *fibptr; 2450
1985 2451 if (now.tv_usec > 500000)
1986 if ((fibptr = aac_fib_alloc(dev))) { 2452 ++now.tv_sec;
1987 int status; 2453
1988 __le32 *info; 2454 if (dev->sa_firmware)
1989 2455 ret =
1990 aac_fib_init(fibptr); 2456 aac_send_safw_hostttime(dev, &now);
1991 2457 else
1992 info = (__le32 *) fib_data(fibptr); 2458 ret = aac_send_hosttime(dev, &now);
1993 if (now.tv_usec > 500000) 2459
1994 ++now.tv_sec;
1995
1996 *info = cpu_to_le32(now.tv_sec);
1997
1998 status = aac_fib_send(SendHostTime,
1999 fibptr,
2000 sizeof(*info),
2001 FsaNormal,
2002 1, 1,
2003 NULL,
2004 NULL);
2005 /* Do not set XferState to zero unless
2006 * receives a response from F/W */
2007 if (status >= 0)
2008 aac_fib_complete(fibptr);
2009 /* FIB should be freed only after
2010 * getting the response from the F/W */
2011 if (status != -ERESTARTSYS)
2012 aac_fib_free(fibptr);
2013 }
2014 difference = (long)(unsigned)update_interval*HZ; 2460 difference = (long)(unsigned)update_interval*HZ;
2015 } else { 2461 } else {
2016 /* retry shortly */ 2462 /* retry shortly */
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 7e836205aef1..417ba349e10e 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -122,7 +123,6 @@ unsigned int aac_response_normal(struct aac_queue * q)
122 * NOTE: we cannot touch the fib after this 123 * NOTE: we cannot touch the fib after this
123 * call, because it may have been deallocated. 124 * call, because it may have been deallocated.
124 */ 125 */
125 fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
126 fib->callback(fib->callback_data, fib); 126 fib->callback(fib->callback_data, fib);
127 } else { 127 } else {
128 unsigned long flagv; 128 unsigned long flagv;
@@ -251,8 +251,9 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
251 BUG_ON(fibptr == NULL); 251 BUG_ON(fibptr == NULL);
252 dev = fibptr->dev; 252 dev = fibptr->dev;
253 253
254 if (fibptr->hw_fib_va->header.XferState & 254 if ((fibptr->hw_fib_va->header.XferState &
255 cpu_to_le32(NoMoreAifDataAvailable)) { 255 cpu_to_le32(NoMoreAifDataAvailable)) ||
256 dev->sa_firmware) {
256 aac_fib_complete(fibptr); 257 aac_fib_complete(fibptr);
257 aac_fib_free(fibptr); 258 aac_fib_free(fibptr);
258 return; 259 return;
@@ -282,8 +283,8 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
282 * know there is a response on our normal priority queue. We will pull off 283 * know there is a response on our normal priority queue. We will pull off
283 * all QE there are and wake up all the waiters before exiting. 284 * all QE there are and wake up all the waiters before exiting.
284 */ 285 */
285unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, 286unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
286 int isAif, int isFastResponse, struct hw_fib *aif_fib) 287 int isFastResponse, struct hw_fib *aif_fib)
287{ 288{
288 unsigned long mflags; 289 unsigned long mflags;
289 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 290 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
@@ -305,12 +306,14 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
305 kfree (fib); 306 kfree (fib);
306 return 1; 307 return 1;
307 } 308 }
308 if (aif_fib != NULL) { 309 if (dev->sa_firmware) {
310 fib->hbacmd_size = index; /* store event type */
311 } else if (aif_fib != NULL) {
309 memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); 312 memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
310 } else { 313 } else {
311 memcpy(hw_fib, 314 memcpy(hw_fib, (struct hw_fib *)
312 (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + 315 (((uintptr_t)(dev->regs.sa)) + index),
313 index), sizeof(struct hw_fib)); 316 sizeof(struct hw_fib));
314 } 317 }
315 INIT_LIST_HEAD(&fib->fiblink); 318 INIT_LIST_HEAD(&fib->fiblink);
316 fib->type = FSAFS_NTC_FIB_CONTEXT; 319 fib->type = FSAFS_NTC_FIB_CONTEXT;
@@ -344,7 +347,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
344 (fib_callback)aac_aif_callback, fibctx); 347 (fib_callback)aac_aif_callback, fibctx);
345 } else { 348 } else {
346 struct fib *fib = &dev->fibs[index]; 349 struct fib *fib = &dev->fibs[index];
347 struct hw_fib * hwfib = fib->hw_fib_va; 350 int start_callback = 0;
348 351
349 /* 352 /*
350 * Remove this fib from the Outstanding I/O queue. 353 * Remove this fib from the Outstanding I/O queue.
@@ -362,60 +365,104 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
362 return 0; 365 return 0;
363 } 366 }
364 367
365 if (isFastResponse) {
366 /*
367 * Doctor the fib
368 */
369 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
370 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
371 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
372 }
373
374 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 368 FIB_COUNTER_INCREMENT(aac_config.FibRecved);
375 369
376 if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) 370 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
377 { 371
378 __le32 *pstatus = (__le32 *)hwfib->data; 372 if (isFastResponse)
379 if (*pstatus & cpu_to_le32(0xffff0000)) 373 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
380 *pstatus = cpu_to_le32(ST_OK); 374
381 } 375 if (fib->callback) {
382 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 376 start_callback = 1;
383 { 377 } else {
384 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) 378 unsigned long flagv;
385 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); 379 int complete = 0;
386 else 380
387 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); 381 dprintk((KERN_INFO "event_wait up\n"));
388 /* 382 spin_lock_irqsave(&fib->event_lock, flagv);
389 * NOTE: we cannot touch the fib after this 383 if (fib->done == 2) {
390 * call, because it may have been deallocated. 384 fib->done = 1;
391 */ 385 complete = 1;
392 if (likely(fib->callback && fib->callback_data)) { 386 } else {
393 fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; 387 fib->done = 1;
394 fib->callback(fib->callback_data, fib); 388 up(&fib->event_wait);
395 } else 389 }
396 dev_info(&dev->pdev->dev, 390 spin_unlock_irqrestore(&fib->event_lock, flagv);
397 "Invalid callback_fib[%d] (*%p)(%p)\n", 391
398 index, fib->callback, fib->callback_data); 392 spin_lock_irqsave(&dev->manage_lock, mflags);
393 dev->management_fib_count--;
394 spin_unlock_irqrestore(&dev->manage_lock,
395 mflags);
396
397 FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
398 if (complete)
399 aac_fib_complete(fib);
400 }
399 } else { 401 } else {
400 unsigned long flagv; 402 struct hw_fib *hwfib = fib->hw_fib_va;
401 dprintk((KERN_INFO "event_wait up\n")); 403
402 spin_lock_irqsave(&fib->event_lock, flagv); 404 if (isFastResponse) {
403 if (!fib->done) { 405 /* Doctor the fib */
404 fib->done = 1; 406 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
405 up(&fib->event_wait); 407 hwfib->header.XferState |=
408 cpu_to_le32(AdapterProcessed);
409 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
406 } 410 }
407 spin_unlock_irqrestore(&fib->event_lock, flagv);
408 411
409 spin_lock_irqsave(&dev->manage_lock, mflags); 412 if (hwfib->header.Command ==
410 dev->management_fib_count--; 413 cpu_to_le16(NuFileSystem)) {
411 spin_unlock_irqrestore(&dev->manage_lock, mflags); 414 __le32 *pstatus = (__le32 *)hwfib->data;
412 415
413 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 416 if (*pstatus & cpu_to_le32(0xffff0000))
414 if (fib->done == 2) { 417 *pstatus = cpu_to_le32(ST_OK);
418 }
419 if (hwfib->header.XferState &
420 cpu_to_le32(NoResponseExpected | Async)) {
421 if (hwfib->header.XferState & cpu_to_le32(
422 NoResponseExpected))
423 FIB_COUNTER_INCREMENT(
424 aac_config.NoResponseRecved);
425 else
426 FIB_COUNTER_INCREMENT(
427 aac_config.AsyncRecved);
428 start_callback = 1;
429 } else {
430 unsigned long flagv;
431 int complete = 0;
432
433 dprintk((KERN_INFO "event_wait up\n"));
415 spin_lock_irqsave(&fib->event_lock, flagv); 434 spin_lock_irqsave(&fib->event_lock, flagv);
416 fib->done = 0; 435 if (fib->done == 2) {
436 fib->done = 1;
437 complete = 1;
438 } else {
439 fib->done = 1;
440 up(&fib->event_wait);
441 }
417 spin_unlock_irqrestore(&fib->event_lock, flagv); 442 spin_unlock_irqrestore(&fib->event_lock, flagv);
443
444 spin_lock_irqsave(&dev->manage_lock, mflags);
445 dev->management_fib_count--;
446 spin_unlock_irqrestore(&dev->manage_lock,
447 mflags);
448
449 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
450 if (complete)
451 aac_fib_complete(fib);
452 }
453 }
454
455
456 if (start_callback) {
457 /*
458 * NOTE: we cannot touch the fib after this
459 * call, because it may have been deallocated.
460 */
461 if (likely(fib->callback && fib->callback_data)) {
462 fib->callback(fib->callback_data, fib);
463 } else {
418 aac_fib_complete(fib); 464 aac_fib_complete(fib);
465 aac_fib_free(fib);
419 } 466 }
420 467
421 } 468 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3ecbf20ca29f..137d22d3a005 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -57,7 +58,7 @@
57 58
58#include "aacraid.h" 59#include "aacraid.h"
59 60
60#define AAC_DRIVER_VERSION "1.2-1" 61#define AAC_DRIVER_VERSION "1.2.1"
61#ifndef AAC_DRIVER_BRANCH 62#ifndef AAC_DRIVER_BRANCH
62#define AAC_DRIVER_BRANCH "" 63#define AAC_DRIVER_BRANCH ""
63#endif 64#endif
@@ -401,61 +402,89 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
401static int aac_slave_configure(struct scsi_device *sdev) 402static int aac_slave_configure(struct scsi_device *sdev)
402{ 403{
403 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 404 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
405 int chn, tid;
406 unsigned int depth = 0;
407 unsigned int set_timeout = 0;
408
409 chn = aac_logical_to_phys(sdev_channel(sdev));
410 tid = sdev_id(sdev);
411 if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
412 aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
413 depth = aac->hba_map[chn][tid].qd_limit;
414 set_timeout = 1;
415 goto common_config;
416 }
417
418
404 if (aac->jbod && (sdev->type == TYPE_DISK)) 419 if (aac->jbod && (sdev->type == TYPE_DISK))
405 sdev->removable = 1; 420 sdev->removable = 1;
406 if ((sdev->type == TYPE_DISK) && 421
407 (sdev_channel(sdev) != CONTAINER_CHANNEL) && 422 if (sdev->type == TYPE_DISK
408 (!aac->jbod || sdev->inq_periph_qual) && 423 && sdev_channel(sdev) != CONTAINER_CHANNEL
409 (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) { 424 && (!aac->jbod || sdev->inq_periph_qual)
425 && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
426
410 if (expose_physicals == 0) 427 if (expose_physicals == 0)
411 return -ENXIO; 428 return -ENXIO;
429
412 if (expose_physicals < 0) 430 if (expose_physicals < 0)
413 sdev->no_uld_attach = 1; 431 sdev->no_uld_attach = 1;
414 } 432 }
415 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 433
416 (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) && 434 if (sdev->tagged_supported
417 !sdev->no_uld_attach) { 435 && sdev->type == TYPE_DISK
436 && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
437 && !sdev->no_uld_attach) {
438
418 struct scsi_device * dev; 439 struct scsi_device * dev;
419 struct Scsi_Host *host = sdev->host; 440 struct Scsi_Host *host = sdev->host;
420 unsigned num_lsu = 0; 441 unsigned num_lsu = 0;
421 unsigned num_one = 0; 442 unsigned num_one = 0;
422 unsigned depth;
423 unsigned cid; 443 unsigned cid;
424 444
425 /* 445 set_timeout = 1;
426 * Firmware has an individual device recovery time typically 446
427 * of 35 seconds, give us a margin.
428 */
429 if (sdev->request_queue->rq_timeout < (45 * HZ))
430 blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
431 for (cid = 0; cid < aac->maximum_num_containers; ++cid) 447 for (cid = 0; cid < aac->maximum_num_containers; ++cid)
432 if (aac->fsa_dev[cid].valid) 448 if (aac->fsa_dev[cid].valid)
433 ++num_lsu; 449 ++num_lsu;
450
434 __shost_for_each_device(dev, host) { 451 __shost_for_each_device(dev, host) {
435 if (dev->tagged_supported && (dev->type == TYPE_DISK) && 452 if (dev->tagged_supported
436 (!aac->raid_scsi_mode || 453 && dev->type == TYPE_DISK
437 (sdev_channel(sdev) != 2)) && 454 && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
438 !dev->no_uld_attach) { 455 && !dev->no_uld_attach) {
439 if ((sdev_channel(dev) != CONTAINER_CHANNEL) 456 if ((sdev_channel(dev) != CONTAINER_CHANNEL)
440 || !aac->fsa_dev[sdev_id(dev)].valid) 457 || !aac->fsa_dev[sdev_id(dev)].valid) {
441 ++num_lsu; 458 ++num_lsu;
442 } else 459 }
460 } else {
443 ++num_one; 461 ++num_one;
462 }
444 } 463 }
464
445 if (num_lsu == 0) 465 if (num_lsu == 0)
446 ++num_lsu; 466 ++num_lsu;
447 depth = (host->can_queue - num_one) / num_lsu;
448 if (depth > 256)
449 depth = 256;
450 else if (depth < 2)
451 depth = 2;
452 scsi_change_queue_depth(sdev, depth);
453 } else {
454 scsi_change_queue_depth(sdev, 1);
455 467
456 sdev->tagged_supported = 1; 468 depth = (host->can_queue - num_one) / num_lsu;
457 } 469 }
458 470
471common_config:
472 /*
473 * Firmware has an individual device recovery time typically
474 * of 35 seconds, give us a margin.
475 */
476 if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
477 blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
478
479 if (depth > 256)
480 depth = 256;
481 else if (depth < 1)
482 depth = 1;
483
484 scsi_change_queue_depth(sdev, depth);
485
486 sdev->tagged_supported = 1;
487
459 return 0; 488 return 0;
460} 489}
461 490
@@ -470,6 +499,15 @@ static int aac_slave_configure(struct scsi_device *sdev)
470 499
471static int aac_change_queue_depth(struct scsi_device *sdev, int depth) 500static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
472{ 501{
502 struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
503 int chn, tid, is_native_device = 0;
504
505 chn = aac_logical_to_phys(sdev_channel(sdev));
506 tid = sdev_id(sdev);
507 if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
508 aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
509 is_native_device = 1;
510
473 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && 511 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
474 (sdev_channel(sdev) == CONTAINER_CHANNEL)) { 512 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
475 struct scsi_device * dev; 513 struct scsi_device * dev;
@@ -491,9 +529,12 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
491 else if (depth < 2) 529 else if (depth < 2)
492 depth = 2; 530 depth = 2;
493 return scsi_change_queue_depth(sdev, depth); 531 return scsi_change_queue_depth(sdev, depth);
532 } else if (is_native_device) {
533 scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
534 } else {
535 scsi_change_queue_depth(sdev, 1);
494 } 536 }
495 537 return sdev->queue_depth;
496 return scsi_change_queue_depth(sdev, 1);
497} 538}
498 539
499static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) 540static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
@@ -516,8 +557,39 @@ static struct device_attribute aac_raid_level_attr = {
516 .show = aac_show_raid_level 557 .show = aac_show_raid_level
517}; 558};
518 559
560static ssize_t aac_show_unique_id(struct device *dev,
561 struct device_attribute *attr, char *buf)
562{
563 struct scsi_device *sdev = to_scsi_device(dev);
564 struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
565 unsigned char sn[16];
566
567 memset(sn, 0, sizeof(sn));
568
569 if (sdev_channel(sdev) == CONTAINER_CHANNEL)
570 memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
571
572 return snprintf(buf, 16 * 2 + 2,
573 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
574 sn[0], sn[1], sn[2], sn[3],
575 sn[4], sn[5], sn[6], sn[7],
576 sn[8], sn[9], sn[10], sn[11],
577 sn[12], sn[13], sn[14], sn[15]);
578}
579
580static struct device_attribute aac_unique_id_attr = {
581 .attr = {
582 .name = "unique_id",
583 .mode = 0444,
584 },
585 .show = aac_show_unique_id
586};
587
588
589
519static struct device_attribute *aac_dev_attrs[] = { 590static struct device_attribute *aac_dev_attrs[] = {
520 &aac_raid_level_attr, 591 &aac_raid_level_attr,
592 &aac_unique_id_attr,
521 NULL, 593 NULL,
522}; 594};
523 595
@@ -534,46 +606,136 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
534 struct scsi_device * dev = cmd->device; 606 struct scsi_device * dev = cmd->device;
535 struct Scsi_Host * host = dev->host; 607 struct Scsi_Host * host = dev->host;
536 struct aac_dev * aac = (struct aac_dev *)host->hostdata; 608 struct aac_dev * aac = (struct aac_dev *)host->hostdata;
537 int count; 609 int count, found;
610 u32 bus, cid;
538 int ret = FAILED; 611 int ret = FAILED;
539 612
540 printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n", 613 bus = aac_logical_to_phys(scmd_channel(cmd));
541 AAC_DRIVERNAME, 614 cid = scmd_id(cmd);
542 host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun); 615 if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
543 switch (cmd->cmnd[0]) { 616 struct fib *fib;
544 case SERVICE_ACTION_IN_16: 617 struct aac_hba_tm_req *tmf;
545 if (!(aac->raw_io_interface) || 618 int status;
546 !(aac->raw_io_64) || 619 u64 address;
547 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 620 __le32 managed_request_id;
548 break; 621
549 case INQUIRY: 622 pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
550 case READ_CAPACITY: 623 AAC_DRIVERNAME,
551 /* Mark associated FIB to not complete, eh handler does this */ 624 host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
625
626 found = 0;
552 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { 627 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
553 struct fib * fib = &aac->fibs[count]; 628 fib = &aac->fibs[count];
554 if (fib->hw_fib_va->header.XferState && 629 if (*(u8 *)fib->hw_fib_va != 0 &&
555 (fib->flags & FIB_CONTEXT_FLAG) && 630 (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
556 (fib->callback_data == cmd)) { 631 (fib->callback_data == cmd)) {
557 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; 632 found = 1;
558 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; 633 managed_request_id = ((struct aac_hba_cmd_req *)
634 fib->hw_fib_va)->request_id;
635 break;
636 }
637 }
638 if (!found)
639 return ret;
640
641 /* start a HBA_TMF_ABORT_TASK TMF request */
642 fib = aac_fib_alloc(aac);
643 if (!fib)
644 return ret;
645
646 tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
647 memset(tmf, 0, sizeof(*tmf));
648 tmf->tmf = HBA_TMF_ABORT_TASK;
649 tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
650 tmf->lun[1] = cmd->device->lun;
651
652 address = (u64)fib->hw_error_pa;
653 tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
654 tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
655 tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
656
657 fib->hbacmd_size = sizeof(*tmf);
658 cmd->SCp.sent_command = 0;
659
660 status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
661 (fib_callback) aac_hba_callback,
662 (void *) cmd);
663
664 /* Wait up to 2 minutes for completion */
665 for (count = 0; count < 120; ++count) {
666 if (cmd->SCp.sent_command) {
559 ret = SUCCESS; 667 ret = SUCCESS;
668 break;
560 } 669 }
670 msleep(1000);
561 } 671 }
562 break; 672
563 case TEST_UNIT_READY: 673 if (ret != SUCCESS)
564 /* Mark associated FIB to not complete, eh handler does this */ 674 pr_err("%s: Host adapter abort request timed out\n",
565 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { 675 AAC_DRIVERNAME);
566 struct scsi_cmnd * command; 676 } else {
567 struct fib * fib = &aac->fibs[count]; 677 pr_err(
568 if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) && 678 "%s: Host adapter abort request.\n"
569 (fib->flags & FIB_CONTEXT_FLAG) && 679 "%s: Outstanding commands on (%d,%d,%d,%d):\n",
570 ((command = fib->callback_data)) && 680 AAC_DRIVERNAME, AAC_DRIVERNAME,
571 (command->device == cmd->device)) { 681 host->host_no, sdev_channel(dev), sdev_id(dev),
572 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; 682 (int)dev->lun);
573 command->SCp.phase = AAC_OWNER_ERROR_HANDLER; 683 switch (cmd->cmnd[0]) {
574 if (command == cmd) 684 case SERVICE_ACTION_IN_16:
685 if (!(aac->raw_io_interface) ||
686 !(aac->raw_io_64) ||
687 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
688 break;
689 case INQUIRY:
690 case READ_CAPACITY:
691 /*
692 * Mark associated FIB to not complete,
693 * eh handler does this
694 */
695 for (count = 0;
696 count < (host->can_queue + AAC_NUM_MGT_FIB);
697 ++count) {
698 struct fib *fib = &aac->fibs[count];
699
700 if (fib->hw_fib_va->header.XferState &&
701 (fib->flags & FIB_CONTEXT_FLAG) &&
702 (fib->callback_data == cmd)) {
703 fib->flags |=
704 FIB_CONTEXT_FLAG_TIMED_OUT;
705 cmd->SCp.phase =
706 AAC_OWNER_ERROR_HANDLER;
575 ret = SUCCESS; 707 ret = SUCCESS;
708 }
709 }
710 break;
711 case TEST_UNIT_READY:
712 /*
713 * Mark associated FIB to not complete,
714 * eh handler does this
715 */
716 for (count = 0;
717 count < (host->can_queue + AAC_NUM_MGT_FIB);
718 ++count) {
719 struct scsi_cmnd *command;
720 struct fib *fib = &aac->fibs[count];
721
722 command = fib->callback_data;
723
724 if ((fib->hw_fib_va->header.XferState &
725 cpu_to_le32
726 (Async | NoResponseExpected)) &&
727 (fib->flags & FIB_CONTEXT_FLAG) &&
728 ((command)) &&
729 (command->device == cmd->device)) {
730 fib->flags |=
731 FIB_CONTEXT_FLAG_TIMED_OUT;
732 command->SCp.phase =
733 AAC_OWNER_ERROR_HANDLER;
734 if (command == cmd)
735 ret = SUCCESS;
736 }
576 } 737 }
738 break;
577 } 739 }
578 } 740 }
579 return ret; 741 return ret;
@@ -588,70 +750,165 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
588{ 750{
589 struct scsi_device * dev = cmd->device; 751 struct scsi_device * dev = cmd->device;
590 struct Scsi_Host * host = dev->host; 752 struct Scsi_Host * host = dev->host;
591 struct scsi_cmnd * command;
592 int count;
593 struct aac_dev * aac = (struct aac_dev *)host->hostdata; 753 struct aac_dev * aac = (struct aac_dev *)host->hostdata;
594 unsigned long flags; 754 int count;
595 755 u32 bus, cid;
596 /* Mark the associated FIB to not complete, eh handler does this */ 756 int ret = FAILED;
597 for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { 757
598 struct fib * fib = &aac->fibs[count]; 758 bus = aac_logical_to_phys(scmd_channel(cmd));
599 if (fib->hw_fib_va->header.XferState && 759 cid = scmd_id(cmd);
600 (fib->flags & FIB_CONTEXT_FLAG) && 760 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
601 (fib->callback_data == cmd)) { 761 aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
602 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; 762 struct fib *fib;
603 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; 763 int status;
764 u64 address;
765 u8 command;
766
767 pr_err("%s: Host adapter reset request. SCSI hang ?\n",
768 AAC_DRIVERNAME);
769
770 fib = aac_fib_alloc(aac);
771 if (!fib)
772 return ret;
773
774
775 if (aac->hba_map[bus][cid].reset_state == 0) {
776 struct aac_hba_tm_req *tmf;
777
778 /* start a HBA_TMF_LUN_RESET TMF request */
779 tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
780 memset(tmf, 0, sizeof(*tmf));
781 tmf->tmf = HBA_TMF_LUN_RESET;
782 tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
783 tmf->lun[1] = cmd->device->lun;
784
785 address = (u64)fib->hw_error_pa;
786 tmf->error_ptr_hi = cpu_to_le32
787 ((u32)(address >> 32));
788 tmf->error_ptr_lo = cpu_to_le32
789 ((u32)(address & 0xffffffff));
790 tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
791 fib->hbacmd_size = sizeof(*tmf);
792
793 command = HBA_IU_TYPE_SCSI_TM_REQ;
794 aac->hba_map[bus][cid].reset_state++;
795 } else if (aac->hba_map[bus][cid].reset_state >= 1) {
796 struct aac_hba_reset_req *rst;
797
798 /* already tried, start a hard reset now */
799 rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
800 memset(rst, 0, sizeof(*rst));
801 /* reset_type is already zero... */
802 rst->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
803
804 address = (u64)fib->hw_error_pa;
805 rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
806 rst->error_ptr_lo = cpu_to_le32
807 ((u32)(address & 0xffffffff));
808 rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
809 fib->hbacmd_size = sizeof(*rst);
810
811 command = HBA_IU_TYPE_SATA_REQ;
812 aac->hba_map[bus][cid].reset_state = 0;
604 } 813 }
605 } 814 cmd->SCp.sent_command = 0;
606 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
607 AAC_DRIVERNAME);
608 815
609 if ((count = aac_check_health(aac))) 816 status = aac_hba_send(command, fib,
610 return count; 817 (fib_callback) aac_hba_callback,
611 /* 818 (void *) cmd);
612 * Wait for all commands to complete to this specific
613 * target (block maximum 60 seconds).
614 */
615 for (count = 60; count; --count) {
616 int active = aac->in_reset;
617 819
618 if (active == 0) 820 /* Wait up to 2 minutes for completion */
619 __shost_for_each_device(dev, host) { 821 for (count = 0; count < 120; ++count) {
620 spin_lock_irqsave(&dev->list_lock, flags); 822 if (cmd->SCp.sent_command) {
621 list_for_each_entry(command, &dev->cmd_list, list) { 823 ret = SUCCESS;
622 if ((command != cmd) &&
623 (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
624 active++;
625 break;
626 }
627 }
628 spin_unlock_irqrestore(&dev->list_lock, flags);
629 if (active)
630 break; 824 break;
825 }
826 msleep(1000);
827 }
631 828
829 if (ret != SUCCESS)
830 pr_err("%s: Host adapter reset request timed out\n",
831 AAC_DRIVERNAME);
832 } else {
833 struct scsi_cmnd *command;
834 unsigned long flags;
835
836 /* Mark the assoc. FIB to not complete, eh handler does this */
837 for (count = 0;
838 count < (host->can_queue + AAC_NUM_MGT_FIB);
839 ++count) {
840 struct fib *fib = &aac->fibs[count];
841
842 if (fib->hw_fib_va->header.XferState &&
843 (fib->flags & FIB_CONTEXT_FLAG) &&
844 (fib->callback_data == cmd)) {
845 fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
846 cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
847 }
632 } 848 }
849
850 pr_err("%s: Host adapter reset request. SCSI hang ?\n",
851 AAC_DRIVERNAME);
852
853 count = aac_check_health(aac);
854 if (count)
855 return count;
633 /* 856 /*
634 * We can exit If all the commands are complete 857 * Wait for all commands to complete to this specific
858 * target (block maximum 60 seconds).
635 */ 859 */
636 if (active == 0) 860 for (count = 60; count; --count) {
637 return SUCCESS; 861 int active = aac->in_reset;
638 ssleep(1); 862
863 if (active == 0)
864 __shost_for_each_device(dev, host) {
865 spin_lock_irqsave(&dev->list_lock, flags);
866 list_for_each_entry(command, &dev->cmd_list,
867 list) {
868 if ((command != cmd) &&
869 (command->SCp.phase ==
870 AAC_OWNER_FIRMWARE)) {
871 active++;
872 break;
873 }
874 }
875 spin_unlock_irqrestore(&dev->list_lock, flags);
876 if (active)
877 break;
878
879 }
880 /*
881 * We can exit If all the commands are complete
882 */
883 if (active == 0)
884 return SUCCESS;
885 ssleep(1);
886 }
887 pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
888
889 /*
890 * This adapter needs a blind reset, only do so for
891 * Adapters that support a register, instead of a commanded,
892 * reset.
893 */
894 if (((aac->supplement_adapter_info.SupportedOptions2 &
895 AAC_OPTION_MU_RESET) ||
896 (aac->supplement_adapter_info.SupportedOptions2 &
897 AAC_OPTION_DOORBELL_RESET)) &&
898 aac_check_reset &&
899 ((aac_check_reset != 1) ||
900 !(aac->supplement_adapter_info.SupportedOptions2 &
901 AAC_OPTION_IGNORE_RESET))) {
902 /* Bypass wait for command quiesce */
903 aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
904 }
905 ret = SUCCESS;
639 } 906 }
640 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
641 /* 907 /*
642 * This adapter needs a blind reset, only do so for Adapters that 908 * Cause an immediate retry of the command with a ten second delay
643 * support a register, instead of a commanded, reset. 909 * after successful tur
644 */ 910 */
645 if (((aac->supplement_adapter_info.SupportedOptions2 & 911 return ret;
646 AAC_OPTION_MU_RESET) ||
647 (aac->supplement_adapter_info.SupportedOptions2 &
648 AAC_OPTION_DOORBELL_RESET)) &&
649 aac_check_reset &&
650 ((aac_check_reset != 1) ||
651 !(aac->supplement_adapter_info.SupportedOptions2 &
652 AAC_OPTION_IGNORE_RESET)))
653 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
654 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
655} 912}
656 913
657/** 914/**
@@ -911,10 +1168,16 @@ static ssize_t aac_store_reset_adapter(struct device *device,
911 const char *buf, size_t count) 1168 const char *buf, size_t count)
912{ 1169{
913 int retval = -EACCES; 1170 int retval = -EACCES;
1171 int bled = 0;
1172 struct aac_dev *aac;
1173
914 1174
915 if (!capable(CAP_SYS_ADMIN)) 1175 if (!capable(CAP_SYS_ADMIN))
916 return retval; 1176 return retval;
917 retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!'); 1177
1178 aac = (struct aac_dev *)class_to_shost(device)->hostdata;
1179 bled = buf[0] == '!' ? 1:0;
1180 retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
918 if (retval >= 0) 1181 if (retval >= 0)
919 retval = count; 1182 retval = count;
920 return retval; 1183 return retval;
@@ -1070,6 +1333,7 @@ static void __aac_shutdown(struct aac_dev * aac)
1070{ 1333{
1071 int i; 1334 int i;
1072 1335
1336 aac->adapter_shutdown = 1;
1073 aac_send_shutdown(aac); 1337 aac_send_shutdown(aac);
1074 1338
1075 if (aac->aif_thread) { 1339 if (aac->aif_thread) {
@@ -1285,7 +1549,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1285 else 1549 else
1286 shost->this_id = shost->max_id; 1550 shost->this_id = shost->max_id;
1287 1551
1288 if (aac_drivers[index].quirks & AAC_QUIRK_SRC) 1552 if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
1289 aac_intr_normal(aac, 0, 2, 0, NULL); 1553 aac_intr_normal(aac, 0, 2, 0, NULL);
1290 1554
1291 /* 1555 /*
@@ -1327,35 +1591,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1327 1591
1328static void aac_release_resources(struct aac_dev *aac) 1592static void aac_release_resources(struct aac_dev *aac)
1329{ 1593{
1330 int i;
1331
1332 aac_adapter_disable_int(aac); 1594 aac_adapter_disable_int(aac);
1333 if (aac->pdev->device == PMC_DEVICE_S6 || 1595 aac_free_irq(aac);
1334 aac->pdev->device == PMC_DEVICE_S7 ||
1335 aac->pdev->device == PMC_DEVICE_S8 ||
1336 aac->pdev->device == PMC_DEVICE_S9) {
1337 if (aac->max_msix > 1) {
1338 for (i = 0; i < aac->max_msix; i++)
1339 free_irq(pci_irq_vector(aac->pdev, i),
1340 &(aac->aac_msix[i]));
1341 } else {
1342 free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
1343 }
1344 } else {
1345 free_irq(aac->pdev->irq, aac);
1346 }
1347 if (aac->msi)
1348 pci_disable_msi(aac->pdev);
1349 else if (aac->max_msix > 1)
1350 pci_disable_msix(aac->pdev);
1351
1352} 1596}
1353 1597
1354static int aac_acquire_resources(struct aac_dev *dev) 1598static int aac_acquire_resources(struct aac_dev *dev)
1355{ 1599{
1356 int i, j;
1357 int instance = dev->id;
1358 const char *name = dev->name;
1359 unsigned long status; 1600 unsigned long status;
1360 /* 1601 /*
1361 * First clear out all interrupts. Then enable the one's that we 1602 * First clear out all interrupts. Then enable the one's that we
@@ -1377,37 +1618,8 @@ static int aac_acquire_resources(struct aac_dev *dev)
1377 if (dev->msi_enabled) 1618 if (dev->msi_enabled)
1378 aac_src_access_devreg(dev, AAC_ENABLE_MSIX); 1619 aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
1379 1620
1380 if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { 1621 if (aac_acquire_irq(dev))
1381 for (i = 0; i < dev->max_msix; i++) { 1622 goto error_iounmap;
1382 dev->aac_msix[i].vector_no = i;
1383 dev->aac_msix[i].dev = dev;
1384
1385 if (request_irq(pci_irq_vector(dev->pdev, i),
1386 dev->a_ops.adapter_intr,
1387 0, "aacraid", &(dev->aac_msix[i]))) {
1388 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
1389 name, instance, i);
1390 for (j = 0 ; j < i ; j++)
1391 free_irq(pci_irq_vector(dev->pdev, j),
1392 &(dev->aac_msix[j]));
1393 pci_disable_msix(dev->pdev);
1394 goto error_iounmap;
1395 }
1396 }
1397 } else {
1398 dev->aac_msix[0].vector_no = 0;
1399 dev->aac_msix[0].dev = dev;
1400
1401 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
1402 IRQF_SHARED, "aacraid",
1403 &(dev->aac_msix[0])) < 0) {
1404 if (dev->msi)
1405 pci_disable_msi(dev->pdev);
1406 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
1407 name, instance);
1408 goto error_iounmap;
1409 }
1410 }
1411 1623
1412 aac_adapter_enable_int(dev); 1624 aac_adapter_enable_int(dev);
1413 1625
@@ -1420,7 +1632,7 @@ static int aac_acquire_resources(struct aac_dev *dev)
1420 /* After EEH recovery or suspend resume, max_msix count 1632 /* After EEH recovery or suspend resume, max_msix count
1421 * may change, therfore updating in init as well. 1633 * may change, therfore updating in init as well.
1422 */ 1634 */
1423 dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); 1635 dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
1424 aac_adapter_start(dev); 1636 aac_adapter_start(dev);
1425 } 1637 }
1426 return 0; 1638 return 0;
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index 6c53b1d8b2ba..c59074e782d6 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -5,7 +5,8 @@
5 * Adaptec aacraid device driver for Linux. 5 * Adaptec aacraid device driver for Linux.
6 * 6 *
7 * Copyright (c) 2000-2010 Adaptec, Inc. 7 * Copyright (c) 2000-2010 Adaptec, Inc.
8 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 8 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
9 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 7d8013feedde..a1bc5bbf7a34 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -60,7 +61,7 @@ static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
60 * case warrants this half baked, but convenient, check here. 61 * case warrants this half baked, but convenient, check here.
61 */ 62 */
62 if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { 63 if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
63 dev->init->MaxIoCommands = 64 dev->init->r7.max_io_commands =
64 cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); 65 cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
65 dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; 66 dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
66 } 67 }
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ac1638069335..0e69a80c3275 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -315,10 +316,10 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
315 316
316static void aac_rx_start_adapter(struct aac_dev *dev) 317static void aac_rx_start_adapter(struct aac_dev *dev)
317{ 318{
318 struct aac_init *init; 319 union aac_init *init;
319 320
320 init = dev->init; 321 init = dev->init;
321 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 322 init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
322 // We can only use a 32 bit address here 323 // We can only use a 32 bit address here
323 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 324 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
324 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 325 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
@@ -470,7 +471,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
470 return 0; 471 return 0;
471} 472}
472 473
473static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) 474static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
474{ 475{
475 u32 var = 0; 476 u32 var = 0;
476 477
@@ -559,7 +560,7 @@ int _aac_rx_init(struct aac_dev *dev)
559 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; 560 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
560 dev->OIMR = status = rx_readb (dev, MUnit.OIMR); 561 dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
561 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && 562 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
562 !aac_rx_restart_adapter(dev, 0)) 563 !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
563 /* Make sure the Hardware FIFO is empty */ 564 /* Make sure the Hardware FIFO is empty */
564 while ((++restart < 512) && 565 while ((++restart < 512) &&
565 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); 566 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
@@ -568,7 +569,8 @@ int _aac_rx_init(struct aac_dev *dev)
568 */ 569 */
569 status = rx_readl(dev, MUnit.OMRx[0]); 570 status = rx_readl(dev, MUnit.OMRx[0]);
570 if (status & KERNEL_PANIC) { 571 if (status & KERNEL_PANIC) {
571 if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev))) 572 if (aac_rx_restart_adapter(dev,
573 aac_rx_check_health(dev), IOP_HWSOFT_RESET))
572 goto error_iounmap; 574 goto error_iounmap;
573 ++restart; 575 ++restart;
574 } 576 }
@@ -606,7 +608,8 @@ int _aac_rx_init(struct aac_dev *dev)
606 ((startup_timeout > 60) 608 ((startup_timeout > 60)
607 ? (startup_timeout - 60) 609 ? (startup_timeout - 60)
608 : (startup_timeout / 2))))) { 610 : (startup_timeout / 2))))) {
609 if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))) 611 if (likely(!aac_rx_restart_adapter(dev,
612 aac_rx_check_health(dev), IOP_HWSOFT_RESET)))
610 start = jiffies; 613 start = jiffies;
611 ++restart; 614 ++restart;
612 } 615 }
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 869aea23c041..553922fed524 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -245,19 +246,19 @@ static void aac_sa_interrupt_adapter (struct aac_dev *dev)
245 246
246static void aac_sa_start_adapter(struct aac_dev *dev) 247static void aac_sa_start_adapter(struct aac_dev *dev)
247{ 248{
248 struct aac_init *init; 249 union aac_init *init;
249 /* 250 /*
250 * Fill in the remaining pieces of the init. 251 * Fill in the remaining pieces of the init.
251 */ 252 */
252 init = dev->init; 253 init = dev->init;
253 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 254 init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
254 /* We can only use a 32 bit address here */ 255 /* We can only use a 32 bit address here */
255 sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 256 sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
256 (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, 257 (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
257 NULL, NULL, NULL, NULL, NULL); 258 NULL, NULL, NULL, NULL, NULL);
258} 259}
259 260
260static int aac_sa_restart_adapter(struct aac_dev *dev, int bled) 261static int aac_sa_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
261{ 262{
262 return -EINVAL; 263 return -EINVAL;
263} 264}
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 0c453880f214..8e4e2ddbafd7 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -6,7 +6,8 @@
6 * Adaptec aacraid device driver for Linux. 6 * Adaptec aacraid device driver for Linux.
7 * 7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. 8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -135,8 +136,16 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
135 136
136 if (mode & AAC_INT_MODE_AIF) { 137 if (mode & AAC_INT_MODE_AIF) {
137 /* handle AIF */ 138 /* handle AIF */
138 if (dev->aif_thread && dev->fsa_dev) 139 if (dev->sa_firmware) {
139 aac_intr_normal(dev, 0, 2, 0, NULL); 140 u32 events = src_readl(dev, MUnit.SCR0);
141
142 aac_intr_normal(dev, events, 1, 0, NULL);
143 writel(events, &dev->IndexRegs->Mailbox[0]);
144 src_writel(dev, MUnit.IDR, 1 << 23);
145 } else {
146 if (dev->aif_thread && dev->fsa_dev)
147 aac_intr_normal(dev, 0, 2, 0, NULL);
148 }
140 if (dev->msi_enabled) 149 if (dev->msi_enabled)
141 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); 150 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
142 mode = 0; 151 mode = 0;
@@ -148,17 +157,19 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
148 for (;;) { 157 for (;;) {
149 isFastResponse = 0; 158 isFastResponse = 0;
150 /* remove toggle bit (31) */ 159 /* remove toggle bit (31) */
151 handle = (dev->host_rrq[index] & 0x7fffffff); 160 handle = le32_to_cpu((dev->host_rrq[index])
152 /* check fast response bit (30) */ 161 & 0x7fffffff);
162 /* check fast response bits (30, 1) */
153 if (handle & 0x40000000) 163 if (handle & 0x40000000)
154 isFastResponse = 1; 164 isFastResponse = 1;
155 handle &= 0x0000ffff; 165 handle &= 0x0000ffff;
156 if (handle == 0) 166 if (handle == 0)
157 break; 167 break;
168 handle >>= 2;
158 if (dev->msi_enabled && dev->max_msix > 1) 169 if (dev->msi_enabled && dev->max_msix > 1)
159 atomic_dec(&dev->rrq_outstanding[vector_no]); 170 atomic_dec(&dev->rrq_outstanding[vector_no]);
171 aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
160 dev->host_rrq[index++] = 0; 172 dev->host_rrq[index++] = 0;
161 aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
162 if (index == (vector_no + 1) * dev->vector_cap) 173 if (index == (vector_no + 1) * dev->vector_cap)
163 index = vector_no * dev->vector_cap; 174 index = vector_no * dev->vector_cap;
164 dev->host_rrq_idx[vector_no] = index; 175 dev->host_rrq_idx[vector_no] = index;
@@ -384,7 +395,7 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
384 395
385static void aac_src_start_adapter(struct aac_dev *dev) 396static void aac_src_start_adapter(struct aac_dev *dev)
386{ 397{
387 struct aac_init *init; 398 union aac_init *init;
388 int i; 399 int i;
389 400
390 /* reset host_rrq_idx first */ 401 /* reset host_rrq_idx first */
@@ -392,14 +403,26 @@ static void aac_src_start_adapter(struct aac_dev *dev)
392 dev->host_rrq_idx[i] = i * dev->vector_cap; 403 dev->host_rrq_idx[i] = i * dev->vector_cap;
393 atomic_set(&dev->rrq_outstanding[i], 0); 404 atomic_set(&dev->rrq_outstanding[i], 0);
394 } 405 }
406 atomic_set(&dev->msix_counter, 0);
395 dev->fibs_pushed_no = 0; 407 dev->fibs_pushed_no = 0;
396 408
397 init = dev->init; 409 init = dev->init;
398 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 410 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
411 init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
412 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
413 lower_32_bits(dev->init_pa),
414 upper_32_bits(dev->init_pa),
415 sizeof(struct _r8) +
416 (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
417 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
418 } else {
419 init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
420 // We can only use a 32 bit address here
421 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
422 (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
423 NULL, NULL, NULL, NULL, NULL);
424 }
399 425
400 /* We can only use a 32 bit address here */
401 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
402 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
403} 426}
404 427
405/** 428/**
@@ -435,6 +458,11 @@ static int aac_src_check_health(struct aac_dev *dev)
435 return 0; 458 return 0;
436} 459}
437 460
461static inline u32 aac_get_vector(struct aac_dev *dev)
462{
463 return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
464}
465
438/** 466/**
439 * aac_src_deliver_message 467 * aac_src_deliver_message
440 * @fib: fib to issue 468 * @fib: fib to issue
@@ -448,66 +476,125 @@ static int aac_src_deliver_message(struct fib *fib)
448 u32 fibsize; 476 u32 fibsize;
449 dma_addr_t address; 477 dma_addr_t address;
450 struct aac_fib_xporthdr *pFibX; 478 struct aac_fib_xporthdr *pFibX;
479 int native_hba;
451#if !defined(writeq) 480#if !defined(writeq)
452 unsigned long flags; 481 unsigned long flags;
453#endif 482#endif
454 483
455 u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
456 u16 vector_no; 484 u16 vector_no;
457 485
458 atomic_inc(&q->numpending); 486 atomic_inc(&q->numpending);
459 487
460 if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && 488 native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
461 dev->max_msix > 1) { 489
462 vector_no = fib->vector_no; 490
463 fib->hw_fib_va->header.Handle += (vector_no << 16); 491 if (dev->msi_enabled && dev->max_msix > 1 &&
492 (native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
493
494 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
495 && dev->sa_firmware)
496 vector_no = aac_get_vector(dev);
497 else
498 vector_no = fib->vector_no;
499
500 if (native_hba) {
501 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
502 struct aac_hba_tm_req *tm_req;
503
504 tm_req = (struct aac_hba_tm_req *)
505 fib->hw_fib_va;
506 if (tm_req->iu_type ==
507 HBA_IU_TYPE_SCSI_TM_REQ) {
508 ((struct aac_hba_tm_req *)
509 fib->hw_fib_va)->reply_qid
510 = vector_no;
511 ((struct aac_hba_tm_req *)
512 fib->hw_fib_va)->request_id
513 += (vector_no << 16);
514 } else {
515 ((struct aac_hba_reset_req *)
516 fib->hw_fib_va)->reply_qid
517 = vector_no;
518 ((struct aac_hba_reset_req *)
519 fib->hw_fib_va)->request_id
520 += (vector_no << 16);
521 }
522 } else {
523 ((struct aac_hba_cmd_req *)
524 fib->hw_fib_va)->reply_qid
525 = vector_no;
526 ((struct aac_hba_cmd_req *)
527 fib->hw_fib_va)->request_id
528 += (vector_no << 16);
529 }
530 } else {
531 fib->hw_fib_va->header.Handle += (vector_no << 16);
532 }
464 } else { 533 } else {
465 vector_no = 0; 534 vector_no = 0;
466 } 535 }
467 536
468 atomic_inc(&dev->rrq_outstanding[vector_no]); 537 atomic_inc(&dev->rrq_outstanding[vector_no]);
469 538
470 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { 539 if (native_hba) {
471 /* Calculate the amount to the fibsize bits */
472 fibsize = (hdr_size + 127) / 128 - 1;
473 if (fibsize > (ALIGN32 - 1))
474 return -EMSGSIZE;
475 /* New FIB header, 32-bit */
476 address = fib->hw_fib_pa; 540 address = fib->hw_fib_pa;
477 fib->hw_fib_va->header.StructType = FIB_MAGIC2; 541 fibsize = (fib->hbacmd_size + 127) / 128 - 1;
478 fib->hw_fib_va->header.SenderFibAddress = (u32)address; 542 if (fibsize > 31)
479 fib->hw_fib_va->header.u.TimeStamp = 0; 543 fibsize = 31;
480 BUG_ON(upper_32_bits(address) != 0L);
481 address |= fibsize; 544 address |= fibsize;
545#if defined(writeq)
546 src_writeq(dev, MUnit.IQN_L, (u64)address);
547#else
548 spin_lock_irqsave(&fib->dev->iq_lock, flags);
549 src_writel(dev, MUnit.IQN_H,
550 upper_32_bits(address) & 0xffffffff);
551 src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
552 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
553#endif
482 } else { 554 } else {
483 /* Calculate the amount to the fibsize bits */ 555 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
484 fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1; 556 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
485 if (fibsize > (ALIGN32 - 1)) 557 /* Calculate the amount to the fibsize bits */
486 return -EMSGSIZE; 558 fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
487 559 + 127) / 128 - 1;
488 /* Fill XPORT header */ 560 /* New FIB header, 32-bit */
489 pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr); 561 address = fib->hw_fib_pa;
490 pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle); 562 fib->hw_fib_va->header.StructType = FIB_MAGIC2;
491 pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa); 563 fib->hw_fib_va->header.SenderFibAddress =
492 pFibX->Size = cpu_to_le32(hdr_size); 564 cpu_to_le32((u32)address);
493 565 fib->hw_fib_va->header.u.TimeStamp = 0;
494 /* 566 WARN_ON(upper_32_bits(address) != 0L);
495 * The xport header has been 32-byte aligned for us so that fibsize 567 } else {
496 * can be masked out of this address by hardware. -- BenC 568 /* Calculate the amount to the fibsize bits */
497 */ 569 fibsize = (sizeof(struct aac_fib_xporthdr) +
498 address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr); 570 le16_to_cpu(fib->hw_fib_va->header.Size)
499 if (address & (ALIGN32 - 1)) 571 + 127) / 128 - 1;
500 return -EINVAL; 572 /* Fill XPORT header */
573 pFibX = (struct aac_fib_xporthdr *)
574 ((unsigned char *)fib->hw_fib_va -
575 sizeof(struct aac_fib_xporthdr));
576 pFibX->Handle = fib->hw_fib_va->header.Handle;
577 pFibX->HostAddress =
578 cpu_to_le64((u64)fib->hw_fib_pa);
579 pFibX->Size = cpu_to_le32(
580 le16_to_cpu(fib->hw_fib_va->header.Size));
581 address = fib->hw_fib_pa -
582 (u64)sizeof(struct aac_fib_xporthdr);
583 }
584 if (fibsize > 31)
585 fibsize = 31;
501 address |= fibsize; 586 address |= fibsize;
502 } 587
503#if defined(writeq) 588#if defined(writeq)
504 src_writeq(dev, MUnit.IQ_L, (u64)address); 589 src_writeq(dev, MUnit.IQ_L, (u64)address);
505#else 590#else
506 spin_lock_irqsave(&fib->dev->iq_lock, flags); 591 spin_lock_irqsave(&fib->dev->iq_lock, flags);
507 src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff); 592 src_writel(dev, MUnit.IQ_H,
508 src_writel(dev, MUnit.IQ_L, address & 0xffffffff); 593 upper_32_bits(address) & 0xffffffff);
509 spin_unlock_irqrestore(&fib->dev->iq_lock, flags); 594 src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
595 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
510#endif 596#endif
597 }
511 return 0; 598 return 0;
512} 599}
513 600
@@ -553,52 +640,117 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
553 dev->base = dev->regs.src.bar0 = NULL; 640 dev->base = dev->regs.src.bar0 = NULL;
554 return 0; 641 return 0;
555 } 642 }
643
644 dev->regs.src.bar1 =
645 ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
646 dev->base = NULL;
647 if (dev->regs.src.bar1 == NULL)
648 return -1;
556 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); 649 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
557 if (dev->base == NULL) 650 if (dev->base == NULL) {
651 iounmap(dev->regs.src.bar1);
652 dev->regs.src.bar1 = NULL;
558 return -1; 653 return -1;
654 }
559 dev->IndexRegs = &((struct src_registers __iomem *) 655 dev->IndexRegs = &((struct src_registers __iomem *)
560 dev->base)->u.denali.IndexRegs; 656 dev->base)->u.denali.IndexRegs;
561 return 0; 657 return 0;
562} 658}
563 659
564static int aac_src_restart_adapter(struct aac_dev *dev, int bled) 660static void aac_set_intx_mode(struct aac_dev *dev)
661{
662 if (dev->msi_enabled) {
663 aac_src_access_devreg(dev, AAC_ENABLE_INTX);
664 dev->msi_enabled = 0;
665 msleep(5000); /* Delay 5 seconds */
666 }
667}
668
669static void aac_send_iop_reset(struct aac_dev *dev, int bled)
565{ 670{
566 u32 var, reset_mask; 671 u32 var, reset_mask;
567 672
568 if (bled >= 0) { 673 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
569 if (bled) 674 0, 0, 0, 0, 0, 0, &var,
570 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 675 &reset_mask, NULL, NULL, NULL);
676
677 if ((bled || var != 0x00000001) && !dev->doorbell_mask)
678 bled = -EINVAL;
679 else if (dev->doorbell_mask) {
680 reset_mask = dev->doorbell_mask;
681 bled = 0;
682 var = 0x00000001;
683 }
684
685 aac_set_intx_mode(dev);
686
687 if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
688 AAC_OPTION_DOORBELL_RESET)) {
689 src_writel(dev, MUnit.IDR, reset_mask);
690 } else {
691 src_writel(dev, MUnit.IDR, 0x100);
692 }
693 msleep(30000);
694}
695
696static void aac_send_hardware_soft_reset(struct aac_dev *dev)
697{
698 u_int32_t val;
699
700 val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
701 val |= 0x01;
702 writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
703 msleep_interruptible(20000);
704}
705
706static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
707{
708 unsigned long status, start;
709
710 if (bled < 0)
711 goto invalid_out;
712
713 if (bled)
714 pr_err("%s%d: adapter kernel panic'd %x.\n",
571 dev->name, dev->id, bled); 715 dev->name, dev->id, bled);
572 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
573 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
574 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
575 if ((bled || (var != 0x00000001)) &&
576 !dev->doorbell_mask)
577 return -EINVAL;
578 else if (dev->doorbell_mask) {
579 reset_mask = dev->doorbell_mask;
580 bled = 0;
581 var = 0x00000001;
582 }
583 716
584 if ((dev->pdev->device == PMC_DEVICE_S7 || 717 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
585 dev->pdev->device == PMC_DEVICE_S8 ||
586 dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) {
587 aac_src_access_devreg(dev, AAC_ENABLE_INTX);
588 dev->msi_enabled = 0;
589 msleep(5000); /* Delay 5 seconds */
590 }
591 718
592 if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & 719 switch (reset_type) {
593 AAC_OPTION_DOORBELL_RESET)) { 720 case IOP_HWSOFT_RESET:
594 src_writel(dev, MUnit.IDR, reset_mask); 721 aac_send_iop_reset(dev, bled);
595 ssleep(45); 722 /*
596 } else { 723 * Check to see if KERNEL_UP_AND_RUNNING
597 src_writel(dev, MUnit.IDR, 0x100); 724 * Wait for the adapter to be up and running.
598 ssleep(45); 725 * If !KERNEL_UP_AND_RUNNING issue HW Soft Reset
726 */
727 status = src_readl(dev, MUnit.OMR);
728 if (dev->sa_firmware
729 && !(status & KERNEL_UP_AND_RUNNING)) {
730 start = jiffies;
731 do {
732 status = src_readl(dev, MUnit.OMR);
733 if (time_after(jiffies,
734 start+HZ*SOFT_RESET_TIME)) {
735 aac_send_hardware_soft_reset(dev);
736 start = jiffies;
737 }
738 } while (!(status & KERNEL_UP_AND_RUNNING));
599 } 739 }
740 break;
741 case HW_SOFT_RESET:
742 if (dev->sa_firmware) {
743 aac_send_hardware_soft_reset(dev);
744 aac_set_intx_mode(dev);
745 }
746 break;
747 default:
748 aac_send_iop_reset(dev, bled);
749 break;
600 } 750 }
601 751
752invalid_out:
753
602 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) 754 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
603 return -ENODEV; 755 return -ENODEV;
604 756
@@ -653,14 +805,15 @@ int aac_src_init(struct aac_dev *dev)
653 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 805 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
654 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 806 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
655 if ((aac_reset_devices || reset_devices) && 807 if ((aac_reset_devices || reset_devices) &&
656 !aac_src_restart_adapter(dev, 0)) 808 !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
657 ++restart; 809 ++restart;
658 /* 810 /*
659 * Check to see if the board panic'd while booting. 811 * Check to see if the board panic'd while booting.
660 */ 812 */
661 status = src_readl(dev, MUnit.OMR); 813 status = src_readl(dev, MUnit.OMR);
662 if (status & KERNEL_PANIC) { 814 if (status & KERNEL_PANIC) {
663 if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) 815 if (aac_src_restart_adapter(dev,
816 aac_src_check_health(dev), IOP_HWSOFT_RESET))
664 goto error_iounmap; 817 goto error_iounmap;
665 ++restart; 818 ++restart;
666 } 819 }
@@ -701,7 +854,7 @@ int aac_src_init(struct aac_dev *dev)
701 ? (startup_timeout - 60) 854 ? (startup_timeout - 60)
702 : (startup_timeout / 2))))) { 855 : (startup_timeout / 2))))) {
703 if (likely(!aac_src_restart_adapter(dev, 856 if (likely(!aac_src_restart_adapter(dev,
704 aac_src_check_health(dev)))) 857 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
705 start = jiffies; 858 start = jiffies;
706 ++restart; 859 ++restart;
707 } 860 }
@@ -798,7 +951,7 @@ int aac_srcv_init(struct aac_dev *dev)
798 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 951 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
799 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 952 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
800 if ((aac_reset_devices || reset_devices) && 953 if ((aac_reset_devices || reset_devices) &&
801 !aac_src_restart_adapter(dev, 0)) 954 !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
802 ++restart; 955 ++restart;
803 /* 956 /*
804 * Check to see if flash update is running. 957 * Check to see if flash update is running.
@@ -827,7 +980,8 @@ int aac_srcv_init(struct aac_dev *dev)
827 */ 980 */
828 status = src_readl(dev, MUnit.OMR); 981 status = src_readl(dev, MUnit.OMR);
829 if (status & KERNEL_PANIC) { 982 if (status & KERNEL_PANIC) {
830 if (aac_src_restart_adapter(dev, aac_src_check_health(dev))) 983 if (aac_src_restart_adapter(dev,
984 aac_src_check_health(dev), IOP_HWSOFT_RESET))
831 goto error_iounmap; 985 goto error_iounmap;
832 ++restart; 986 ++restart;
833 } 987 }
@@ -866,7 +1020,8 @@ int aac_srcv_init(struct aac_dev *dev)
866 ((startup_timeout > 60) 1020 ((startup_timeout > 60)
867 ? (startup_timeout - 60) 1021 ? (startup_timeout - 60)
868 : (startup_timeout / 2))))) { 1022 : (startup_timeout / 2))))) {
869 if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev)))) 1023 if (likely(!aac_src_restart_adapter(dev,
1024 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
870 start = jiffies; 1025 start = jiffies;
871 ++restart; 1026 ++restart;
872 } 1027 }
@@ -897,7 +1052,8 @@ int aac_srcv_init(struct aac_dev *dev)
897 1052
898 if (aac_init_adapter(dev) == NULL) 1053 if (aac_init_adapter(dev) == NULL)
899 goto error_iounmap; 1054 goto error_iounmap;
900 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) 1055 if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
1056 (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
901 goto error_iounmap; 1057 goto error_iounmap;
902 if (dev->msi_enabled) 1058 if (dev->msi_enabled)
903 aac_src_access_devreg(dev, AAC_ENABLE_MSIX); 1059 aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
@@ -905,9 +1061,9 @@ int aac_srcv_init(struct aac_dev *dev)
905 if (aac_acquire_irq(dev)) 1061 if (aac_acquire_irq(dev))
906 goto error_iounmap; 1062 goto error_iounmap;
907 1063
908 dev->dbg_base = dev->base_start; 1064 dev->dbg_base = pci_resource_start(dev->pdev, 2);
909 dev->dbg_base_mapped = dev->base; 1065 dev->dbg_base_mapped = dev->regs.src.bar1;
910 dev->dbg_size = dev->base_size; 1066 dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
911 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 1067 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
912 1068
913 aac_adapter_enable_int(dev); 1069 aac_adapter_enable_int(dev);
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 105b35393ce9..f792420c533e 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -178,37 +178,6 @@ static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
178} 178}
179 179
180 180
181#if 0
182/* Dead code... wasn't called anyway :-) and causes some trouble, because at
183 * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has
184 * to clear the DMA int pending bit before it allows other level 6 interrupts.
185 */
186static void scsi_dma_buserr(int irq, void *dummy)
187{
188 unsigned char dma_stat = tt_scsi_dma.dma_ctrl;
189
190 /* Don't do anything if a NCR interrupt is pending. Probably it's just
191 * masked... */
192 if (atari_irq_pending(IRQ_TT_MFP_SCSI))
193 return;
194
195 printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n",
196 SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt));
197 if (dma_stat & 0x80) {
198 if (!scsi_dma_is_ignored_buserr(dma_stat))
199 printk("SCSI DMA bus error -- bad DMA programming!\n");
200 } else {
201 /* Under normal circumstances we never should get to this point,
202 * since both interrupts are triggered simultaneously and the 5380
203 * int has higher priority. When this irq is handled, that DMA
204 * interrupt is cleared. So a warning message is printed here.
205 */
206 printk("SCSI DMA intr ?? -- this shouldn't happen!\n");
207 }
208}
209#endif
210
211
212static irqreturn_t scsi_tt_intr(int irq, void *dev) 181static irqreturn_t scsi_tt_intr(int irq, void *dev)
213{ 182{
214 struct Scsi_Host *instance = dev; 183 struct Scsi_Host *instance = dev;
@@ -713,7 +682,8 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
713 if (IS_A_TT()) { 682 if (IS_A_TT()) {
714 tt_scsi_dma.dma_ctrl = 0; 683 tt_scsi_dma.dma_ctrl = 0;
715 } else { 684 } else {
716 st_dma.dma_mode_status = 0x90; 685 if (stdma_is_locked_by(scsi_falcon_intr))
686 st_dma.dma_mode_status = 0x90;
717 atari_dma_active = 0; 687 atari_dma_active = 0;
718 atari_dma_orig_addr = NULL; 688 atari_dma_orig_addr = NULL;
719 } 689 }
@@ -813,7 +783,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
813 return -ENOMEM; 783 return -ENOMEM;
814 } 784 }
815 atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); 785 atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
816 atari_dma_orig_addr = 0; 786 atari_dma_orig_addr = NULL;
817 } 787 }
818 788
819 instance = scsi_host_alloc(&atari_scsi_template, 789 instance = scsi_host_alloc(&atari_scsi_template,
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index b1d0fdc5d5e1..ca9440fb2325 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -84,7 +84,6 @@ static inline void queue_tail_inc(struct be_queue_info *q)
84/*ISCSI */ 84/*ISCSI */
85 85
86struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ 86struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
87 bool enable;
88 u32 min_eqd; /* in usecs */ 87 u32 min_eqd; /* in usecs */
89 u32 max_eqd; /* in usecs */ 88 u32 max_eqd; /* in usecs */
90 u32 prev_eqd; /* in usecs */ 89 u32 prev_eqd; /* in usecs */
@@ -94,8 +93,6 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
94}; 93};
95 94
96struct be_eq_obj { 95struct be_eq_obj {
97 bool todo_mcc_cq;
98 bool todo_cq;
99 u32 cq_count; 96 u32 cq_count;
100 struct be_queue_info q; 97 struct be_queue_info q;
101 struct beiscsi_hba *phba; 98 struct beiscsi_hba *phba;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index be65da2988fb..5d59e2630ce6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -676,10 +676,10 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
676 bool embedded, u8 sge_cnt) 676 bool embedded, u8 sge_cnt)
677{ 677{
678 if (embedded) 678 if (embedded)
679 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 679 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
680 else 680 else
681 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << 681 wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
682 MCC_WRB_SGE_CNT_SHIFT; 682 MCC_WRB_SGE_CNT_SHIFT;
683 wrb->payload_length = payload_len; 683 wrb->payload_length = payload_len;
684 be_dws_cpu_to_le(wrb, 8); 684 be_dws_cpu_to_le(wrb, 8);
685} 685}
@@ -1599,7 +1599,7 @@ int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
1599{ 1599{
1600 struct be_ctrl_info *ctrl = &phba->ctrl; 1600 struct be_ctrl_info *ctrl = &phba->ctrl;
1601 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 1601 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1602 struct be_post_sgl_pages_req *req = embedded_payload(wrb); 1602 struct be_post_sgl_pages_req *req;
1603 int status; 1603 int status;
1604 1604
1605 mutex_lock(&ctrl->mbox_lock); 1605 mutex_lock(&ctrl->mbox_lock);
@@ -1700,31 +1700,34 @@ int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1700 struct be_ctrl_info *ctrl = &phba->ctrl; 1700 struct be_ctrl_info *ctrl = &phba->ctrl;
1701 struct iscsi_cleanup_req_v1 *req_v1; 1701 struct iscsi_cleanup_req_v1 *req_v1;
1702 struct iscsi_cleanup_req *req; 1702 struct iscsi_cleanup_req *req;
1703 u16 hdr_ring_id, data_ring_id;
1703 struct be_mcc_wrb *wrb; 1704 struct be_mcc_wrb *wrb;
1704 int status; 1705 int status;
1705 1706
1706 mutex_lock(&ctrl->mbox_lock); 1707 mutex_lock(&ctrl->mbox_lock);
1707 wrb = wrb_from_mbox(&ctrl->mbox_mem); 1708 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1708 req = embedded_payload(wrb);
1709 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1710 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1711 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
1712 1709
1713 /** 1710 hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1714 * TODO: Check with FW folks the chute value to be set. 1711 data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
1715 * For now, use the ULP_MASK as the chute value.
1716 */
1717 if (is_chip_be2_be3r(phba)) { 1712 if (is_chip_be2_be3r(phba)) {
1713 req = embedded_payload(wrb);
1714 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1715 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1716 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
1718 req->chute = (1 << ulp); 1717 req->chute = (1 << ulp);
1719 req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp); 1718 /* BE2/BE3 FW creates 8-bit ring id */
1720 req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp); 1719 req->hdr_ring_id = hdr_ring_id;
1720 req->data_ring_id = data_ring_id;
1721 } else { 1721 } else {
1722 req_v1 = (struct iscsi_cleanup_req_v1 *)req; 1722 req_v1 = embedded_payload(wrb);
1723 be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
1724 be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
1725 OPCODE_COMMON_ISCSI_CLEANUP,
1726 sizeof(*req_v1));
1723 req_v1->hdr.version = 1; 1727 req_v1->hdr.version = 1;
1724 req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, 1728 req_v1->chute = (1 << ulp);
1725 ulp)); 1729 req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
1726 req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, 1730 req_v1->data_ring_id = cpu_to_le16(data_ring_id);
1727 ulp));
1728 } 1731 }
1729 1732
1730 status = be_mbox_notify(ctrl); 1733 status = be_mbox_notify(ctrl);
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 328fb5b973cd..1d40e83b0790 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -31,10 +31,16 @@ struct be_sge {
31 __le32 len; 31 __le32 len;
32}; 32};
33 33
34#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
35#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
36struct be_mcc_wrb { 34struct be_mcc_wrb {
37 u32 embedded; /* dword 0 */ 35 u32 emb_sgecnt_special; /* dword 0 */
36 /* bits 0 - embedded */
37 /* bits 1 - 2 reserved */
38 /* bits 3 - 7 sge count */
39 /* bits 8 - 23 reserved */
40 /* bits 24 - 31 special */
41#define MCC_WRB_EMBEDDED_MASK 1
42#define MCC_WRB_SGE_CNT_SHIFT 3
43#define MCC_WRB_SGE_CNT_MASK 0x1F
38 u32 payload_length; /* dword 1 */ 44 u32 payload_length; /* dword 1 */
39 u32 tag0; /* dword 2 */ 45 u32 tag0; /* dword 2 */
40 u32 tag1; /* dword 3 */ 46 u32 tag1; /* dword 3 */
@@ -1133,11 +1139,6 @@ struct tcp_connect_and_offload_out {
1133 1139
1134} __packed; 1140} __packed;
1135 1141
1136struct be_mcc_wrb_context {
1137 struct MCC_WRB *wrb;
1138 int *users_final_status;
1139} __packed;
1140
1141#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */ 1142#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */
1142#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */ 1143#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */
1143#define DB_DEF_PDU_REARM_SHIFT 14 1144#define DB_DEF_PDU_REARM_SHIFT 14
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index ba258217614e..a4844578e357 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -166,33 +166,6 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
166} 166}
167 167
168/** 168/**
169 * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
170 * @beiscsi_conn: The pointer to beiscsi_conn structure
171 * @phba: The phba instance
172 * @cid: The cid to free
173 */
174static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
175 struct beiscsi_conn *beiscsi_conn,
176 unsigned int cid)
177{
178 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
179
180 if (phba->conn_table[cri_index]) {
181 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
182 "BS_%d : Connection table already occupied. Detected clash\n");
183
184 return -EINVAL;
185 } else {
186 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
187 "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
188 cri_index, beiscsi_conn);
189
190 phba->conn_table[cri_index] = beiscsi_conn;
191 }
192 return 0;
193}
194
195/**
196 * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection 169 * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
197 * @cls_session: pointer to iscsi cls session 170 * @cls_session: pointer to iscsi cls session
198 * @cls_conn: pointer to iscsi cls conn 171 * @cls_conn: pointer to iscsi cls conn
@@ -212,6 +185,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
212 struct hwi_wrb_context *pwrb_context; 185 struct hwi_wrb_context *pwrb_context;
213 struct beiscsi_endpoint *beiscsi_ep; 186 struct beiscsi_endpoint *beiscsi_ep;
214 struct iscsi_endpoint *ep; 187 struct iscsi_endpoint *ep;
188 uint16_t cri_index;
215 189
216 ep = iscsi_lookup_endpoint(transport_fd); 190 ep = iscsi_lookup_endpoint(transport_fd);
217 if (!ep) 191 if (!ep)
@@ -229,20 +203,34 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
229 203
230 return -EEXIST; 204 return -EEXIST;
231 } 205 }
232 206 cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
233 pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID( 207 if (phba->conn_table[cri_index]) {
234 beiscsi_ep->ep_cid)]; 208 if (beiscsi_conn != phba->conn_table[cri_index] ||
209 beiscsi_ep != phba->conn_table[cri_index]->ep) {
210 __beiscsi_log(phba, KERN_ERR,
211 "BS_%d : conn_table not empty at %u: cid %u conn %p:%p\n",
212 cri_index,
213 beiscsi_ep->ep_cid,
214 beiscsi_conn,
215 phba->conn_table[cri_index]);
216 return -EINVAL;
217 }
218 }
235 219
236 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; 220 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
237 beiscsi_conn->ep = beiscsi_ep; 221 beiscsi_conn->ep = beiscsi_ep;
238 beiscsi_ep->conn = beiscsi_conn; 222 beiscsi_ep->conn = beiscsi_conn;
223 /**
224 * Each connection is associated with a WRBQ kept in wrb_context.
225 * Store doorbell offset for transmit path.
226 */
227 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
239 beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset; 228 beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
240
241 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 229 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
242 "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n", 230 "BS_%d : cid %d phba->conn_table[%u]=%p\n",
243 beiscsi_conn, conn, beiscsi_ep->ep_cid); 231 beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
244 232 phba->conn_table[cri_index] = beiscsi_conn;
245 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid); 233 return 0;
246} 234}
247 235
248static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba) 236static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
@@ -973,9 +961,9 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
973 */ 961 */
974static int beiscsi_get_cid(struct beiscsi_hba *phba) 962static int beiscsi_get_cid(struct beiscsi_hba *phba)
975{ 963{
976 unsigned short cid = 0xFFFF, cid_from_ulp;
977 struct ulp_cid_info *cid_info = NULL;
978 uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; 964 uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
965 unsigned short cid, cid_from_ulp;
966 struct ulp_cid_info *cid_info;
979 967
980 /* Find the ULP which has more CID available */ 968 /* Find the ULP which has more CID available */
981 cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? 969 cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
@@ -984,20 +972,27 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
984 BEISCSI_ULP1_AVLBL_CID(phba) : 0; 972 BEISCSI_ULP1_AVLBL_CID(phba) : 0;
985 cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? 973 cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
986 BEISCSI_ULP0 : BEISCSI_ULP1; 974 BEISCSI_ULP0 : BEISCSI_ULP1;
987 975 /**
988 if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) { 976 * If iSCSI protocol is loaded only on ULP 0, and when cid_avlbl_ulp
989 cid_info = phba->cid_array_info[cid_from_ulp]; 977 * is ZERO for both, ULP 1 is returned.
990 if (!cid_info->avlbl_cids) 978 * Check if ULP is loaded before getting new CID.
991 return cid; 979 */
992 980 if (!test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported))
993 cid = cid_info->cid_array[cid_info->cid_alloc++]; 981 return BE_INVALID_CID;
994 982
995 if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT( 983 cid_info = phba->cid_array_info[cid_from_ulp];
996 phba, cid_from_ulp)) 984 cid = cid_info->cid_array[cid_info->cid_alloc];
997 cid_info->cid_alloc = 0; 985 if (!cid_info->avlbl_cids || cid == BE_INVALID_CID) {
998 986 __beiscsi_log(phba, KERN_ERR,
999 cid_info->avlbl_cids--; 987 "BS_%d : failed to get cid: available %u:%u\n",
988 cid_info->avlbl_cids, cid_info->cid_free);
989 return BE_INVALID_CID;
1000 } 990 }
991 /* empty the slot */
992 cid_info->cid_array[cid_info->cid_alloc++] = BE_INVALID_CID;
993 if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(phba, cid_from_ulp))
994 cid_info->cid_alloc = 0;
995 cid_info->avlbl_cids--;
1001 return cid; 996 return cid;
1002} 997}
1003 998
@@ -1008,22 +1003,28 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
1008 */ 1003 */
1009static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) 1004static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
1010{ 1005{
1011 uint16_t cid_post_ulp;
1012 struct hwi_controller *phwi_ctrlr;
1013 struct hwi_wrb_context *pwrb_context;
1014 struct ulp_cid_info *cid_info = NULL;
1015 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1006 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1007 struct hwi_wrb_context *pwrb_context;
1008 struct hwi_controller *phwi_ctrlr;
1009 struct ulp_cid_info *cid_info;
1010 uint16_t cid_post_ulp;
1016 1011
1017 phwi_ctrlr = phba->phwi_ctrlr; 1012 phwi_ctrlr = phba->phwi_ctrlr;
1018 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1013 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1019 cid_post_ulp = pwrb_context->ulp_num; 1014 cid_post_ulp = pwrb_context->ulp_num;
1020 1015
1021 cid_info = phba->cid_array_info[cid_post_ulp]; 1016 cid_info = phba->cid_array_info[cid_post_ulp];
1022 cid_info->avlbl_cids++; 1017 /* fill only in empty slot */
1023 1018 if (cid_info->cid_array[cid_info->cid_free] != BE_INVALID_CID) {
1019 __beiscsi_log(phba, KERN_ERR,
1020 "BS_%d : failed to put cid %u: available %u:%u\n",
1021 cid, cid_info->avlbl_cids, cid_info->cid_free);
1022 return;
1023 }
1024 cid_info->cid_array[cid_info->cid_free++] = cid; 1024 cid_info->cid_array[cid_info->cid_free++] = cid;
1025 if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) 1025 if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
1026 cid_info->cid_free = 0; 1026 cid_info->cid_free = 0;
1027 cid_info->avlbl_cids++;
1027} 1028}
1028 1029
1029/** 1030/**
@@ -1037,8 +1038,8 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
1037 1038
1038 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 1039 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
1039 beiscsi_ep->phba = NULL; 1040 beiscsi_ep->phba = NULL;
1040 phba->ep_array[BE_GET_CRI_FROM_CID 1041 /* clear this to track freeing in beiscsi_ep_disconnect */
1041 (beiscsi_ep->ep_cid)] = NULL; 1042 phba->ep_array[BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid)] = NULL;
1042 1043
1043 /** 1044 /**
1044 * Check if any connection resource allocated by driver 1045 * Check if any connection resource allocated by driver
@@ -1049,6 +1050,11 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
1049 return; 1050 return;
1050 1051
1051 beiscsi_conn = beiscsi_ep->conn; 1052 beiscsi_conn = beiscsi_ep->conn;
1053 /**
1054 * Break ep->conn link here so that completions after
1055 * this are ignored.
1056 */
1057 beiscsi_ep->conn = NULL;
1052 if (beiscsi_conn->login_in_progress) { 1058 if (beiscsi_conn->login_in_progress) {
1053 beiscsi_free_mgmt_task_handles(beiscsi_conn, 1059 beiscsi_free_mgmt_task_handles(beiscsi_conn,
1054 beiscsi_conn->task); 1060 beiscsi_conn->task);
@@ -1079,7 +1085,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1079 "BS_%d : In beiscsi_open_conn\n"); 1085 "BS_%d : In beiscsi_open_conn\n");
1080 1086
1081 beiscsi_ep->ep_cid = beiscsi_get_cid(phba); 1087 beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
1082 if (beiscsi_ep->ep_cid == 0xFFFF) { 1088 if (beiscsi_ep->ep_cid == BE_INVALID_CID) {
1083 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1089 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1084 "BS_%d : No free cid available\n"); 1090 "BS_%d : No free cid available\n");
1085 return ret; 1091 return ret;
@@ -1114,7 +1120,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1114 nonemb_cmd.size = req_memsize; 1120 nonemb_cmd.size = req_memsize;
1115 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 1121 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
1116 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); 1122 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
1117 if (tag <= 0) { 1123 if (!tag) {
1118 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1124 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1119 "BS_%d : mgmt_open_connection Failed for cid=%d\n", 1125 "BS_%d : mgmt_open_connection Failed for cid=%d\n",
1120 beiscsi_ep->ep_cid); 1126 beiscsi_ep->ep_cid);
@@ -1285,26 +1291,6 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
1285} 1291}
1286 1292
1287/** 1293/**
1288 * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
1289 * @phba: The phba instance
1290 * @cid: The cid to free
1291 */
1292static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
1293 unsigned int cid)
1294{
1295 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1296
1297 if (phba->conn_table[cri_index])
1298 phba->conn_table[cri_index] = NULL;
1299 else {
1300 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1301 "BS_%d : Connection table Not occupied.\n");
1302 return -EINVAL;
1303 }
1304 return 0;
1305}
1306
1307/**
1308 * beiscsi_ep_disconnect - Tears down the TCP connection 1294 * beiscsi_ep_disconnect - Tears down the TCP connection
1309 * @ep: endpoint to be used 1295 * @ep: endpoint to be used
1310 * 1296 *
@@ -1318,13 +1304,23 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
1318 unsigned int tag; 1304 unsigned int tag;
1319 uint8_t mgmt_invalidate_flag, tcp_upload_flag; 1305 uint8_t mgmt_invalidate_flag, tcp_upload_flag;
1320 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; 1306 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
1307 uint16_t cri_index;
1321 1308
1322 beiscsi_ep = ep->dd_data; 1309 beiscsi_ep = ep->dd_data;
1323 phba = beiscsi_ep->phba; 1310 phba = beiscsi_ep->phba;
1324 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1311 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1325 "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n", 1312 "BS_%d : In beiscsi_ep_disconnect for ep_cid = %u\n",
1326 beiscsi_ep->ep_cid); 1313 beiscsi_ep->ep_cid);
1327 1314
1315 cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
1316 if (!phba->ep_array[cri_index]) {
1317 __beiscsi_log(phba, KERN_ERR,
1318 "BS_%d : ep_array at %u cid %u empty\n",
1319 cri_index,
1320 beiscsi_ep->ep_cid);
1321 return;
1322 }
1323
1328 if (beiscsi_ep->conn) { 1324 if (beiscsi_ep->conn) {
1329 beiscsi_conn = beiscsi_ep->conn; 1325 beiscsi_conn = beiscsi_ep->conn;
1330 iscsi_suspend_queue(beiscsi_conn->conn); 1326 iscsi_suspend_queue(beiscsi_conn->conn);
@@ -1356,7 +1352,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
1356free_ep: 1352free_ep:
1357 msleep(BEISCSI_LOGOUT_SYNC_DELAY); 1353 msleep(BEISCSI_LOGOUT_SYNC_DELAY);
1358 beiscsi_free_ep(beiscsi_ep); 1354 beiscsi_free_ep(beiscsi_ep);
1359 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 1355 if (!phba->conn_table[cri_index])
1356 __beiscsi_log(phba, KERN_ERR,
1357 "BS_%d : conn_table empty at %u: cid %u\n",
1358 cri_index,
1359 beiscsi_ep->ep_cid);
1360 phba->conn_table[cri_index] = NULL;
1360 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); 1361 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
1361} 1362}
1362 1363
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index b5112d6d7e73..32b2713cec93 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -67,8 +67,6 @@ beiscsi_##_name##_disp(struct device *dev,\
67{ \ 67{ \
68 struct Scsi_Host *shost = class_to_shost(dev);\ 68 struct Scsi_Host *shost = class_to_shost(dev);\
69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 uint32_t param_val = 0; \
71 param_val = phba->attr_##_name;\
72 return snprintf(buf, PAGE_SIZE, "%d\n",\ 70 return snprintf(buf, PAGE_SIZE, "%d\n",\
73 phba->attr_##_name);\ 71 phba->attr_##_name);\
74} 72}
@@ -218,160 +216,156 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
218 216
219static int beiscsi_eh_abort(struct scsi_cmnd *sc) 217static int beiscsi_eh_abort(struct scsi_cmnd *sc)
220{ 218{
219 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
221 struct iscsi_cls_session *cls_session; 220 struct iscsi_cls_session *cls_session;
222 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 221 struct beiscsi_io_task *abrt_io_task;
223 struct beiscsi_io_task *aborted_io_task;
224 struct iscsi_conn *conn;
225 struct beiscsi_conn *beiscsi_conn; 222 struct beiscsi_conn *beiscsi_conn;
226 struct beiscsi_hba *phba;
227 struct iscsi_session *session; 223 struct iscsi_session *session;
228 struct invalidate_command_table *inv_tbl; 224 struct invldt_cmd_tbl inv_tbl;
229 struct be_dma_mem nonemb_cmd; 225 struct beiscsi_hba *phba;
230 unsigned int cid, tag, num_invalidate; 226 struct iscsi_conn *conn;
231 int rc; 227 int rc;
232 228
233 cls_session = starget_to_session(scsi_target(sc->device)); 229 cls_session = starget_to_session(scsi_target(sc->device));
234 session = cls_session->dd_data; 230 session = cls_session->dd_data;
235 231
236 spin_lock_bh(&session->frwd_lock); 232 /* check if we raced, task just got cleaned up under us */
237 if (!aborted_task || !aborted_task->sc) { 233 spin_lock_bh(&session->back_lock);
238 /* we raced */ 234 if (!abrt_task || !abrt_task->sc) {
239 spin_unlock_bh(&session->frwd_lock); 235 spin_unlock_bh(&session->back_lock);
240 return SUCCESS;
241 }
242
243 aborted_io_task = aborted_task->dd_data;
244 if (!aborted_io_task->scsi_cmnd) {
245 /* raced or invalid command */
246 spin_unlock_bh(&session->frwd_lock);
247 return SUCCESS; 236 return SUCCESS;
248 } 237 }
249 spin_unlock_bh(&session->frwd_lock); 238 /* get a task ref till FW processes the req for the ICD used */
250 /* Invalidate WRB Posted for this Task */ 239 __iscsi_get_task(abrt_task);
251 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 240 abrt_io_task = abrt_task->dd_data;
252 aborted_io_task->pwrb_handle->pwrb, 241 conn = abrt_task->conn;
253 1);
254
255 conn = aborted_task->conn;
256 beiscsi_conn = conn->dd_data; 242 beiscsi_conn = conn->dd_data;
257 phba = beiscsi_conn->phba; 243 phba = beiscsi_conn->phba;
258 244 /* mark WRB invalid which have been not processed by FW yet */
259 /* invalidate iocb */ 245 if (is_chip_be2_be3r(phba)) {
260 cid = beiscsi_conn->beiscsi_conn_cid; 246 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
261 inv_tbl = phba->inv_tbl; 247 abrt_io_task->pwrb_handle->pwrb, 1);
262 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 248 } else {
263 inv_tbl->cid = cid; 249 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
264 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 250 abrt_io_task->pwrb_handle->pwrb, 1);
265 num_invalidate = 1;
266 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
267 sizeof(struct invalidate_commands_params_in),
268 &nonemb_cmd.dma);
269 if (nonemb_cmd.va == NULL) {
270 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
271 "BM_%d : Failed to allocate memory for"
272 "mgmt_invalidate_icds\n");
273 return FAILED;
274 } 251 }
275 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 252 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid;
253 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index;
254 spin_unlock_bh(&session->back_lock);
276 255
277 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 256 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1);
278 cid, &nonemb_cmd); 257 iscsi_put_task(abrt_task);
279 if (!tag) { 258 if (rc) {
280 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 259 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
281 "BM_%d : mgmt_invalidate_icds could not be" 260 "BM_%d : sc %p invalidation failed %d\n",
282 "submitted\n"); 261 sc, rc);
283 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
284 nonemb_cmd.va, nonemb_cmd.dma);
285
286 return FAILED; 262 return FAILED;
287 } 263 }
288 264
289 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
290 if (rc != -EBUSY)
291 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
292 nonemb_cmd.va, nonemb_cmd.dma);
293
294 return iscsi_eh_abort(sc); 265 return iscsi_eh_abort(sc);
295} 266}
296 267
297static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 268static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
298{ 269{
299 struct iscsi_task *abrt_task; 270 struct beiscsi_invldt_cmd_tbl {
300 struct beiscsi_io_task *abrt_io_task; 271 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ];
301 struct iscsi_conn *conn; 272 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ];
273 } *inv_tbl;
274 struct iscsi_cls_session *cls_session;
302 struct beiscsi_conn *beiscsi_conn; 275 struct beiscsi_conn *beiscsi_conn;
303 struct beiscsi_hba *phba; 276 struct beiscsi_io_task *io_task;
304 struct iscsi_session *session; 277 struct iscsi_session *session;
305 struct iscsi_cls_session *cls_session; 278 struct beiscsi_hba *phba;
306 struct invalidate_command_table *inv_tbl; 279 struct iscsi_conn *conn;
307 struct be_dma_mem nonemb_cmd; 280 struct iscsi_task *task;
308 unsigned int cid, tag, i, num_invalidate; 281 unsigned int i, nents;
309 int rc; 282 int rc, more = 0;
310 283
311 /* invalidate iocbs */
312 cls_session = starget_to_session(scsi_target(sc->device)); 284 cls_session = starget_to_session(scsi_target(sc->device));
313 session = cls_session->dd_data; 285 session = cls_session->dd_data;
286
314 spin_lock_bh(&session->frwd_lock); 287 spin_lock_bh(&session->frwd_lock);
315 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 288 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
316 spin_unlock_bh(&session->frwd_lock); 289 spin_unlock_bh(&session->frwd_lock);
317 return FAILED; 290 return FAILED;
318 } 291 }
292
319 conn = session->leadconn; 293 conn = session->leadconn;
320 beiscsi_conn = conn->dd_data; 294 beiscsi_conn = conn->dd_data;
321 phba = beiscsi_conn->phba; 295 phba = beiscsi_conn->phba;
322 cid = beiscsi_conn->beiscsi_conn_cid; 296
323 inv_tbl = phba->inv_tbl; 297 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC);
324 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 298 if (!inv_tbl) {
325 num_invalidate = 0; 299 spin_unlock_bh(&session->frwd_lock);
300 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
301 "BM_%d : invldt_cmd_tbl alloc failed\n");
302 return FAILED;
303 }
304 nents = 0;
305 /* take back_lock to prevent task from getting cleaned up under us */
306 spin_lock(&session->back_lock);
326 for (i = 0; i < conn->session->cmds_max; i++) { 307 for (i = 0; i < conn->session->cmds_max; i++) {
327 abrt_task = conn->session->cmds[i]; 308 task = conn->session->cmds[i];
328 abrt_io_task = abrt_task->dd_data; 309 if (!task->sc)
329 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
330 continue; 310 continue;
331 311
332 if (sc->device->lun != abrt_task->sc->device->lun) 312 if (sc->device->lun != task->sc->device->lun)
333 continue; 313 continue;
314 /**
315 * Can't fit in more cmds? Normally this won't happen b'coz
316 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ.
317 */
318 if (nents == BE_INVLDT_CMD_TBL_SZ) {
319 more = 1;
320 break;
321 }
334 322
335 /* Invalidate WRB Posted for this Task */ 323 /* get a task ref till FW processes the req for the ICD used */
336 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 324 __iscsi_get_task(task);
337 abrt_io_task->pwrb_handle->pwrb, 325 io_task = task->dd_data;
338 1); 326 /* mark WRB invalid which have been not processed by FW yet */
327 if (is_chip_be2_be3r(phba)) {
328 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
329 io_task->pwrb_handle->pwrb, 1);
330 } else {
331 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
332 io_task->pwrb_handle->pwrb, 1);
333 }
339 334
340 inv_tbl->cid = cid; 335 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid;
341 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 336 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index;
342 num_invalidate++; 337 inv_tbl->task[nents] = task;
343 inv_tbl++; 338 nents++;
344 } 339 }
340 spin_unlock_bh(&session->back_lock);
345 spin_unlock_bh(&session->frwd_lock); 341 spin_unlock_bh(&session->frwd_lock);
346 inv_tbl = phba->inv_tbl;
347 342
348 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 343 rc = SUCCESS;
349 sizeof(struct invalidate_commands_params_in), 344 if (!nents)
350 &nonemb_cmd.dma); 345 goto end_reset;
351 if (nonemb_cmd.va == NULL) { 346
347 if (more) {
352 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 348 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
353 "BM_%d : Failed to allocate memory for" 349 "BM_%d : number of cmds exceeds size of invalidation table\n");
354 "mgmt_invalidate_icds\n"); 350 rc = FAILED;
355 return FAILED; 351 goto end_reset;
356 } 352 }
357 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 353
358 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 354 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) {
359 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
360 cid, &nonemb_cmd);
361 if (!tag) {
362 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 355 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
363 "BM_%d : mgmt_invalidate_icds could not be" 356 "BM_%d : cid %u scmds invalidation failed\n",
364 " submitted\n"); 357 beiscsi_conn->beiscsi_conn_cid);
365 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 358 rc = FAILED;
366 nonemb_cmd.va, nonemb_cmd.dma);
367 return FAILED;
368 } 359 }
369 360
370 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); 361end_reset:
371 if (rc != -EBUSY) 362 for (i = 0; i < nents; i++)
372 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 363 iscsi_put_task(inv_tbl->task[i]);
373 nonemb_cmd.va, nonemb_cmd.dma); 364 kfree(inv_tbl);
374 return iscsi_eh_device_reset(sc); 365
366 if (rc == SUCCESS)
367 rc = iscsi_eh_device_reset(sc);
368 return rc;
375} 369}
376 370
377/*------------------- PCI Driver operations and data ----------------- */ 371/*------------------- PCI Driver operations and data ----------------- */
@@ -395,6 +389,7 @@ static struct scsi_host_template beiscsi_sht = {
395 .change_queue_depth = scsi_change_queue_depth, 389 .change_queue_depth = scsi_change_queue_depth,
396 .slave_configure = beiscsi_slave_configure, 390 .slave_configure = beiscsi_slave_configure,
397 .target_alloc = iscsi_target_alloc, 391 .target_alloc = iscsi_target_alloc,
392 .eh_timed_out = iscsi_eh_cmd_timed_out,
398 .eh_abort_handler = beiscsi_eh_abort, 393 .eh_abort_handler = beiscsi_eh_abort,
399 .eh_device_reset_handler = beiscsi_eh_device_reset, 394 .eh_device_reset_handler = beiscsi_eh_device_reset,
400 .eh_target_reset_handler = iscsi_eh_session_reset, 395 .eh_target_reset_handler = iscsi_eh_session_reset,
@@ -646,7 +641,6 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
646 phba->params.num_sge_per_io = BE2_SGE; 641 phba->params.num_sge_per_io = BE2_SGE;
647 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 642 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
648 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 643 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
649 phba->params.eq_timer = 64;
650 phba->params.num_eq_entries = 1024; 644 phba->params.num_eq_entries = 1024;
651 phba->params.num_cq_entries = 1024; 645 phba->params.num_cq_entries = 1024;
652 phba->params.wrbs_per_cxn = 256; 646 phba->params.wrbs_per_cxn = 256;
@@ -964,6 +958,10 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
964 unsigned long flags; 958 unsigned long flags;
965 959
966 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 960 spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
961 if (!pwrb_context->wrb_handles_available) {
962 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
963 return NULL;
964 }
967 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 965 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
968 pwrb_context->wrb_handles_available--; 966 pwrb_context->wrb_handles_available--;
969 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 967 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
@@ -1014,6 +1012,7 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
1014 pwrb_context->free_index = 0; 1012 pwrb_context->free_index = 0;
1015 else 1013 else
1016 pwrb_context->free_index++; 1014 pwrb_context->free_index++;
1015 pwrb_handle->pio_handle = NULL;
1017 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1016 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
1018} 1017}
1019 1018
@@ -1224,6 +1223,7 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1224 uint16_t wrb_index, cid, cri_index; 1223 uint16_t wrb_index, cid, cri_index;
1225 struct hwi_controller *phwi_ctrlr; 1224 struct hwi_controller *phwi_ctrlr;
1226 struct wrb_handle *pwrb_handle; 1225 struct wrb_handle *pwrb_handle;
1226 struct iscsi_session *session;
1227 struct iscsi_task *task; 1227 struct iscsi_task *task;
1228 1228
1229 phwi_ctrlr = phba->phwi_ctrlr; 1229 phwi_ctrlr = phba->phwi_ctrlr;
@@ -1242,8 +1242,12 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1242 cri_index = BE_GET_CRI_FROM_CID(cid); 1242 cri_index = BE_GET_CRI_FROM_CID(cid);
1243 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1243 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1244 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1244 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1245 session = beiscsi_conn->conn->session;
1246 spin_lock_bh(&session->back_lock);
1245 task = pwrb_handle->pio_handle; 1247 task = pwrb_handle->pio_handle;
1246 iscsi_put_task(task); 1248 if (task)
1249 __iscsi_put_task(task);
1250 spin_unlock_bh(&session->back_lock);
1247} 1251}
1248 1252
1249static void 1253static void
@@ -1323,16 +1327,15 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1323static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1327static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1324 struct beiscsi_hba *phba, struct sol_cqe *psol) 1328 struct beiscsi_hba *phba, struct sol_cqe *psol)
1325{ 1329{
1326 struct hwi_wrb_context *pwrb_context;
1327 struct wrb_handle *pwrb_handle;
1328 struct iscsi_wrb *pwrb = NULL;
1329 struct hwi_controller *phwi_ctrlr;
1330 struct iscsi_task *task;
1331 unsigned int type;
1332 struct iscsi_conn *conn = beiscsi_conn->conn; 1330 struct iscsi_conn *conn = beiscsi_conn->conn;
1333 struct iscsi_session *session = conn->session; 1331 struct iscsi_session *session = conn->session;
1334 struct common_sol_cqe csol_cqe = {0}; 1332 struct common_sol_cqe csol_cqe = {0};
1333 struct hwi_wrb_context *pwrb_context;
1334 struct hwi_controller *phwi_ctrlr;
1335 struct wrb_handle *pwrb_handle;
1336 struct iscsi_task *task;
1335 uint16_t cri_index = 0; 1337 uint16_t cri_index = 0;
1338 uint8_t type;
1336 1339
1337 phwi_ctrlr = phba->phwi_ctrlr; 1340 phwi_ctrlr = phba->phwi_ctrlr;
1338 1341
@@ -1345,11 +1348,14 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1345 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1348 pwrb_handle = pwrb_context->pwrb_handle_basestd[
1346 csol_cqe.wrb_index]; 1349 csol_cqe.wrb_index];
1347 1350
1351 spin_lock_bh(&session->back_lock);
1348 task = pwrb_handle->pio_handle; 1352 task = pwrb_handle->pio_handle;
1349 pwrb = pwrb_handle->pwrb; 1353 if (!task) {
1354 spin_unlock_bh(&session->back_lock);
1355 return;
1356 }
1350 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1357 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1351 1358
1352 spin_lock_bh(&session->back_lock);
1353 switch (type) { 1359 switch (type) {
1354 case HWH_TYPE_IO: 1360 case HWH_TYPE_IO:
1355 case HWH_TYPE_IO_RD: 1361 case HWH_TYPE_IO_RD:
@@ -1711,13 +1717,12 @@ beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
1711 struct list_head *hfree_list; 1717 struct list_head *hfree_list;
1712 struct phys_addr *pasync_sge; 1718 struct phys_addr *pasync_sge;
1713 u32 ring_id, doorbell = 0; 1719 u32 ring_id, doorbell = 0;
1714 u16 index, num_entries;
1715 u32 doorbell_offset; 1720 u32 doorbell_offset;
1716 u16 prod = 0, cons; 1721 u16 prod = 0, cons;
1722 u16 index;
1717 1723
1718 phwi_ctrlr = phba->phwi_ctrlr; 1724 phwi_ctrlr = phba->phwi_ctrlr;
1719 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1725 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1720 num_entries = pasync_ctx->num_entries;
1721 if (header) { 1726 if (header) {
1722 cons = pasync_ctx->async_header.free_entries; 1727 cons = pasync_ctx->async_header.free_entries;
1723 hfree_list = &pasync_ctx->async_header.free_list; 1728 hfree_list = &pasync_ctx->async_header.free_list;
@@ -2374,13 +2379,10 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2374static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2379static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2375{ 2380{
2376 uint8_t mem_descr_index, ulp_num; 2381 uint8_t mem_descr_index, ulp_num;
2377 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2382 unsigned int num_async_pdu_buf_pages;
2378 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2383 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2379 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2384 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2380 2385
2381 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2382 sizeof(struct sol_cqe));
2383
2384 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2386 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2385 2387
2386 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2388 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
@@ -2737,7 +2739,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2737 2739
2738 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2740 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2739 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2741 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2740 /* get async_ctx for each ULP */ 2742 /* get async_ctx for each ULP */
2741 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2743 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2742 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2744 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2743 (ulp_num * MEM_DESCR_OFFSET)); 2745 (ulp_num * MEM_DESCR_OFFSET));
@@ -3367,7 +3369,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3367 struct hwi_context_memory *phwi_context, 3369 struct hwi_context_memory *phwi_context,
3368 struct hwi_controller *phwi_ctrlr) 3370 struct hwi_controller *phwi_ctrlr)
3369{ 3371{
3370 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3372 unsigned int num_wrb_rings;
3371 u64 pa_addr_lo; 3373 u64 pa_addr_lo;
3372 unsigned int idx, num, i, ulp_num; 3374 unsigned int idx, num, i, ulp_num;
3373 struct mem_array *pwrb_arr; 3375 struct mem_array *pwrb_arr;
@@ -3432,10 +3434,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3432 } 3434 }
3433 3435
3434 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3436 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3435 wrb_mem_index = 0;
3436 offset = 0;
3437 size = 0;
3438
3439 if (ulp_count > 1) { 3437 if (ulp_count > 1) {
3440 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3438 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3441 3439
@@ -3663,7 +3661,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba)
3663 struct be_ctrl_info *ctrl = &phba->ctrl; 3661 struct be_ctrl_info *ctrl = &phba->ctrl;
3664 struct hwi_controller *phwi_ctrlr; 3662 struct hwi_controller *phwi_ctrlr;
3665 struct hwi_context_memory *phwi_context; 3663 struct hwi_context_memory *phwi_context;
3666 struct hd_async_context *pasync_ctx;
3667 int i, eq_for_mcc, ulp_num; 3664 int i, eq_for_mcc, ulp_num;
3668 3665
3669 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3666 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
@@ -3700,8 +3697,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba)
3700 q = &phwi_context->be_def_dataq[ulp_num]; 3697 q = &phwi_context->be_def_dataq[ulp_num];
3701 if (q->created) 3698 if (q->created)
3702 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3699 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3703
3704 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3705 } 3700 }
3706 } 3701 }
3707 3702
@@ -3804,7 +3799,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3804 /** 3799 /**
3805 * Now that the default PDU rings have been created, 3800 * Now that the default PDU rings have been created,
3806 * let EP know about it. 3801 * let EP know about it.
3807 * Call beiscsi_cmd_iscsi_cleanup before posting?
3808 */ 3802 */
3809 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3803 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
3810 ulp_num); 3804 ulp_num);
@@ -3850,14 +3844,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3850 phwi_ctrlr->wrb_context[cri].cid] = 3844 phwi_ctrlr->wrb_context[cri].cid] =
3851 async_arr_idx++; 3845 async_arr_idx++;
3852 } 3846 }
3853 /**
3854 * Now that the default PDU rings have been created,
3855 * let EP know about it.
3856 */
3857 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
3858 ulp_num);
3859 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
3860 ulp_num);
3861 } 3847 }
3862 } 3848 }
3863 3849
@@ -3934,31 +3920,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
3934 kfree(phba->phwi_ctrlr); 3920 kfree(phba->phwi_ctrlr);
3935} 3921}
3936 3922
3937static int beiscsi_init_controller(struct beiscsi_hba *phba)
3938{
3939 int ret = -ENOMEM;
3940
3941 ret = beiscsi_get_memory(phba);
3942 if (ret < 0) {
3943 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3944 "BM_%d : beiscsi_dev_probe -"
3945 "Failed in beiscsi_alloc_memory\n");
3946 return ret;
3947 }
3948
3949 ret = hwi_init_controller(phba);
3950 if (ret)
3951 goto free_init;
3952 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3953 "BM_%d : Return success from beiscsi_init_controller");
3954
3955 return 0;
3956
3957free_init:
3958 beiscsi_free_mem(phba);
3959 return ret;
3960}
3961
3962static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3923static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3963{ 3924{
3964 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3925 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
@@ -4089,9 +4050,10 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4089 } 4050 }
4090 4051
4091 /* Allocate memory for CID array */ 4052 /* Allocate memory for CID array */
4092 ptr_cid_info->cid_array = kzalloc(sizeof(void *) * 4053 ptr_cid_info->cid_array =
4093 BEISCSI_GET_CID_COUNT(phba, 4054 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num),
4094 ulp_num), GFP_KERNEL); 4055 sizeof(*ptr_cid_info->cid_array),
4056 GFP_KERNEL);
4095 if (!ptr_cid_info->cid_array) { 4057 if (!ptr_cid_info->cid_array) {
4096 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4058 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4097 "BM_%d : Failed to allocate memory" 4059 "BM_%d : Failed to allocate memory"
@@ -4231,33 +4193,30 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
4231{ 4193{
4232 int ret; 4194 int ret;
4233 4195
4234 ret = beiscsi_init_controller(phba); 4196 ret = hwi_init_controller(phba);
4235 if (ret < 0) { 4197 if (ret < 0) {
4236 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4198 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4237 "BM_%d : beiscsi_dev_probe - Failed in" 4199 "BM_%d : init controller failed\n");
4238 "beiscsi_init_controller\n");
4239 return ret; 4200 return ret;
4240 } 4201 }
4241 ret = beiscsi_init_sgl_handle(phba); 4202 ret = beiscsi_init_sgl_handle(phba);
4242 if (ret < 0) { 4203 if (ret < 0) {
4243 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4204 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4244 "BM_%d : beiscsi_dev_probe - Failed in" 4205 "BM_%d : init sgl handles failed\n");
4245 "beiscsi_init_sgl_handle\n"); 4206 goto cleanup_port;
4246 goto do_cleanup_ctrlr;
4247 } 4207 }
4248 4208
4249 ret = hba_setup_cid_tbls(phba); 4209 ret = hba_setup_cid_tbls(phba);
4250 if (ret < 0) { 4210 if (ret < 0) {
4251 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4211 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4252 "BM_%d : Failed in hba_setup_cid_tbls\n"); 4212 "BM_%d : setup CID table failed\n");
4253 kfree(phba->io_sgl_hndl_base); 4213 kfree(phba->io_sgl_hndl_base);
4254 kfree(phba->eh_sgl_hndl_base); 4214 kfree(phba->eh_sgl_hndl_base);
4255 goto do_cleanup_ctrlr; 4215 goto cleanup_port;
4256 } 4216 }
4257
4258 return ret; 4217 return ret;
4259 4218
4260do_cleanup_ctrlr: 4219cleanup_port:
4261 hwi_cleanup_port(phba); 4220 hwi_cleanup_port(phba);
4262 return ret; 4221 return ret;
4263} 4222}
@@ -5417,10 +5376,10 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
5417 5376
5418 phba->shost->max_id = phba->params.cxns_per_ctrl; 5377 phba->shost->max_id = phba->params.cxns_per_ctrl;
5419 phba->shost->can_queue = phba->params.ios_per_ctrl; 5378 phba->shost->can_queue = phba->params.ios_per_ctrl;
5420 ret = hwi_init_controller(phba); 5379 ret = beiscsi_init_port(phba);
5421 if (ret) { 5380 if (ret < 0) {
5422 __beiscsi_log(phba, KERN_ERR, 5381 __beiscsi_log(phba, KERN_ERR,
5423 "BM_%d : init controller failed %d\n", ret); 5382 "BM_%d : init port failed\n");
5424 goto disable_msix; 5383 goto disable_msix;
5425 } 5384 }
5426 5385
@@ -5526,6 +5485,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
5526 cancel_work_sync(&pbe_eq->mcc_work); 5485 cancel_work_sync(&pbe_eq->mcc_work);
5527 } 5486 }
5528 hwi_cleanup_port(phba); 5487 hwi_cleanup_port(phba);
5488 beiscsi_cleanup_port(phba);
5529} 5489}
5530 5490
5531static void beiscsi_sess_work(struct work_struct *work) 5491static void beiscsi_sess_work(struct work_struct *work)
@@ -5638,11 +5598,12 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
5638static int beiscsi_dev_probe(struct pci_dev *pcidev, 5598static int beiscsi_dev_probe(struct pci_dev *pcidev,
5639 const struct pci_device_id *id) 5599 const struct pci_device_id *id)
5640{ 5600{
5641 struct beiscsi_hba *phba = NULL;
5642 struct hwi_controller *phwi_ctrlr;
5643 struct hwi_context_memory *phwi_context; 5601 struct hwi_context_memory *phwi_context;
5602 struct hwi_controller *phwi_ctrlr;
5603 struct beiscsi_hba *phba = NULL;
5644 struct be_eq_obj *pbe_eq; 5604 struct be_eq_obj *pbe_eq;
5645 unsigned int s_handle; 5605 unsigned int s_handle;
5606 char wq_name[20];
5646 int ret, i; 5607 int ret, i;
5647 5608
5648 ret = beiscsi_enable_pci(pcidev); 5609 ret = beiscsi_enable_pci(pcidev);
@@ -5680,6 +5641,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5680 case OC_DEVICE_ID2: 5641 case OC_DEVICE_ID2:
5681 phba->generation = BE_GEN2; 5642 phba->generation = BE_GEN2;
5682 phba->iotask_fn = beiscsi_iotask; 5643 phba->iotask_fn = beiscsi_iotask;
5644 dev_warn(&pcidev->dev,
5645 "Obsolete/Unsupported BE2 Adapter Family\n");
5683 break; 5646 break;
5684 case BE_DEVICE_ID2: 5647 case BE_DEVICE_ID2:
5685 case OC_DEVICE_ID3: 5648 case OC_DEVICE_ID3:
@@ -5735,11 +5698,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5735 5698
5736 phba->shost->max_id = phba->params.cxns_per_ctrl; 5699 phba->shost->max_id = phba->params.cxns_per_ctrl;
5737 phba->shost->can_queue = phba->params.ios_per_ctrl; 5700 phba->shost->can_queue = phba->params.ios_per_ctrl;
5701 ret = beiscsi_get_memory(phba);
5702 if (ret < 0) {
5703 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5704 "BM_%d : alloc host mem failed\n");
5705 goto free_port;
5706 }
5707
5738 ret = beiscsi_init_port(phba); 5708 ret = beiscsi_init_port(phba);
5739 if (ret < 0) { 5709 if (ret < 0) {
5740 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5710 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5741 "BM_%d : beiscsi_dev_probe-" 5711 "BM_%d : init port failed\n");
5742 "Failed in beiscsi_init_port\n"); 5712 beiscsi_free_mem(phba);
5743 goto free_port; 5713 goto free_port;
5744 } 5714 }
5745 5715
@@ -5754,9 +5724,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5754 5724
5755 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5725 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5756 5726
5757 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", 5727 snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
5758 phba->shost->host_no); 5728 phba->shost->host_no);
5759 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); 5729 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
5760 if (!phba->wq) { 5730 if (!phba->wq) {
5761 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5731 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5762 "BM_%d : beiscsi_dev_probe-" 5732 "BM_%d : beiscsi_dev_probe-"
@@ -5881,7 +5851,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
5881 5851
5882 /* free all resources */ 5852 /* free all resources */
5883 destroy_workqueue(phba->wq); 5853 destroy_workqueue(phba->wq);
5884 beiscsi_cleanup_port(phba);
5885 beiscsi_free_mem(phba); 5854 beiscsi_free_mem(phba);
5886 5855
5887 /* ctrl uninit */ 5856 /* ctrl uninit */
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 6376657e45f7..218857926566 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
36#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
37 37
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "11.2.0.0" 39#define BUILD_STR "11.2.1.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -57,7 +57,6 @@
57 57
58#define BE2_IO_DEPTH 1024 58#define BE2_IO_DEPTH 1024
59#define BE2_MAX_SESSIONS 256 59#define BE2_MAX_SESSIONS 256
60#define BE2_CMDS_PER_CXN 128
61#define BE2_TMFS 16 60#define BE2_TMFS 16
62#define BE2_NOPOUT_REQ 16 61#define BE2_NOPOUT_REQ 16
63#define BE2_SGE 32 62#define BE2_SGE 32
@@ -72,8 +71,13 @@
72 71
73#define BEISCSI_SGLIST_ELEMENTS 30 72#define BEISCSI_SGLIST_ELEMENTS 30
74 73
75#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ 74/**
76#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ 75 * BE_INVLDT_CMD_TBL_SZ is 128 which is total number commands that can
76 * be invalidated at a time, consider it before changing the value of
77 * BEISCSI_CMD_PER_LUN.
78 */
79#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
80#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */
77#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ 81#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
78 82
79#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ 83#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
@@ -239,19 +243,7 @@ struct hba_parameters {
239 unsigned int num_cq_entries; 243 unsigned int num_cq_entries;
240 unsigned int num_eq_entries; 244 unsigned int num_eq_entries;
241 unsigned int wrbs_per_cxn; 245 unsigned int wrbs_per_cxn;
242 unsigned int crashmode;
243 unsigned int hba_num;
244
245 unsigned int mgmt_ws_sz;
246 unsigned int hwi_ws_sz; 246 unsigned int hwi_ws_sz;
247
248 unsigned int eto;
249 unsigned int ldto;
250
251 unsigned int dbg_flags;
252 unsigned int num_cxn;
253
254 unsigned int eq_timer;
255 /** 247 /**
256 * These are calculated from other params. They're here 248 * These are calculated from other params. They're here
257 * for debug purposes 249 * for debug purposes
@@ -272,11 +264,6 @@ struct hba_parameters {
272 unsigned int num_sge; 264 unsigned int num_sge;
273}; 265};
274 266
275struct invalidate_command_table {
276 unsigned short icd;
277 unsigned short cid;
278} __packed;
279
280#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ 267#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
281 (phwi_ctrlr->wrb_context[cri].ulp_num) 268 (phwi_ctrlr->wrb_context[cri].ulp_num)
282struct hwi_wrb_context { 269struct hwi_wrb_context {
@@ -334,7 +321,6 @@ struct beiscsi_hba {
334 struct be_bus_address pci_pa; /* CSR */ 321 struct be_bus_address pci_pa; /* CSR */
335 /* PCI representation of our HBA */ 322 /* PCI representation of our HBA */
336 struct pci_dev *pcidev; 323 struct pci_dev *pcidev;
337 unsigned short asic_revision;
338 unsigned int num_cpus; 324 unsigned int num_cpus;
339 unsigned int nxt_cqid; 325 unsigned int nxt_cqid;
340 struct msix_entry msix_entries[MAX_CPUS]; 326 struct msix_entry msix_entries[MAX_CPUS];
@@ -355,9 +341,9 @@ struct beiscsi_hba {
355 spinlock_t io_sgl_lock; 341 spinlock_t io_sgl_lock;
356 spinlock_t mgmt_sgl_lock; 342 spinlock_t mgmt_sgl_lock;
357 spinlock_t async_pdu_lock; 343 spinlock_t async_pdu_lock;
358 unsigned int age;
359 struct list_head hba_queue; 344 struct list_head hba_queue;
360#define BE_MAX_SESSION 2048 345#define BE_MAX_SESSION 2048
346#define BE_INVALID_CID 0xffff
361#define BE_SET_CID_TO_CRI(cri_index, cid) \ 347#define BE_SET_CID_TO_CRI(cri_index, cid) \
362 (phba->cid_to_cri_map[cid] = cri_index) 348 (phba->cid_to_cri_map[cid] = cri_index)
363#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) 349#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
@@ -425,12 +411,10 @@ struct beiscsi_hba {
425 u8 port_name; 411 u8 port_name;
426 u8 port_speed; 412 u8 port_speed;
427 char fw_ver_str[BEISCSI_VER_STRLEN]; 413 char fw_ver_str[BEISCSI_VER_STRLEN];
428 char wq_name[20];
429 struct workqueue_struct *wq; /* The actuak work queue */ 414 struct workqueue_struct *wq; /* The actuak work queue */
430 struct be_ctrl_info ctrl; 415 struct be_ctrl_info ctrl;
431 unsigned int generation; 416 unsigned int generation;
432 unsigned int interface_handle; 417 unsigned int interface_handle;
433 struct invalidate_command_table inv_tbl[128];
434 418
435 struct be_aic_obj aic_obj[MAX_CPUS]; 419 struct be_aic_obj aic_obj[MAX_CPUS];
436 unsigned int attr_log_enable; 420 unsigned int attr_log_enable;
@@ -525,10 +509,6 @@ struct beiscsi_io_task {
525 struct scsi_cmnd *scsi_cmnd; 509 struct scsi_cmnd *scsi_cmnd;
526 int num_sg; 510 int num_sg;
527 struct hwi_wrb_context *pwrb_context; 511 struct hwi_wrb_context *pwrb_context;
528 unsigned int cmd_sn;
529 unsigned int flags;
530 unsigned short cid;
531 unsigned short header_len;
532 itt_t libiscsi_itt; 512 itt_t libiscsi_itt;
533 struct be_cmd_bhs *cmd_bhs; 513 struct be_cmd_bhs *cmd_bhs;
534 struct be_bus_address bhs_pa; 514 struct be_bus_address bhs_pa;
@@ -842,7 +822,7 @@ struct amap_iscsi_wrb_v2 {
842 u8 diff_enbl; /* DWORD 11 */ 822 u8 diff_enbl; /* DWORD 11 */
843 u8 u_run; /* DWORD 11 */ 823 u8 u_run; /* DWORD 11 */
844 u8 o_run; /* DWORD 11 */ 824 u8 o_run; /* DWORD 11 */
845 u8 invalid; /* DWORD 11 */ 825 u8 invld; /* DWORD 11 */
846 u8 dsp; /* DWORD 11 */ 826 u8 dsp; /* DWORD 11 */
847 u8 dmsg; /* DWORD 11 */ 827 u8 dmsg; /* DWORD 11 */
848 u8 rsvd4; /* DWORD 11 */ 828 u8 rsvd4; /* DWORD 11 */
@@ -1042,10 +1022,8 @@ struct hwi_controller {
1042 struct list_head io_sgl_list; 1022 struct list_head io_sgl_list;
1043 struct list_head eh_sgl_list; 1023 struct list_head eh_sgl_list;
1044 struct sgl_handle *psgl_handle_base; 1024 struct sgl_handle *psgl_handle_base;
1045 unsigned int wrb_mem_index;
1046 1025
1047 struct hwi_wrb_context *wrb_context; 1026 struct hwi_wrb_context *wrb_context;
1048 struct mcc_wrb *pmcc_wrb_base;
1049 struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; 1027 struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
1050 struct be_ring default_pdu_data[BEISCSI_ULP_COUNT]; 1028 struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
1051 struct hwi_context_memory *phwi_ctxt; 1029 struct hwi_context_memory *phwi_ctxt;
@@ -1062,9 +1040,7 @@ enum hwh_type_enum {
1062}; 1040};
1063 1041
1064struct wrb_handle { 1042struct wrb_handle {
1065 enum hwh_type_enum type;
1066 unsigned short wrb_index; 1043 unsigned short wrb_index;
1067
1068 struct iscsi_task *pio_handle; 1044 struct iscsi_task *pio_handle;
1069 struct iscsi_wrb *pwrb; 1045 struct iscsi_wrb *pwrb;
1070}; 1046};
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ac05317bba7f..2f6d5c2ac329 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -66,7 +66,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
66 struct bsg_job *job, 66 struct bsg_job *job,
67 struct be_dma_mem *nonemb_cmd) 67 struct be_dma_mem *nonemb_cmd)
68{ 68{
69 struct be_cmd_resp_hdr *resp;
70 struct be_mcc_wrb *wrb; 69 struct be_mcc_wrb *wrb;
71 struct be_sge *mcc_sge; 70 struct be_sge *mcc_sge;
72 unsigned int tag = 0; 71 unsigned int tag = 0;
@@ -76,7 +75,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
76 75
77 nonemb_cmd->size = job->request_payload.payload_len; 76 nonemb_cmd->size = job->request_payload.payload_len;
78 memset(nonemb_cmd->va, 0, nonemb_cmd->size); 77 memset(nonemb_cmd->va, 0, nonemb_cmd->size);
79 resp = nonemb_cmd->va;
80 region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; 78 region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
81 sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; 79 sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
82 sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; 80 sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
@@ -128,50 +126,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
128 return tag; 126 return tag;
129} 127}
130 128
131unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
132 struct invalidate_command_table *inv_tbl,
133 unsigned int num_invalidate, unsigned int cid,
134 struct be_dma_mem *nonemb_cmd)
135
136{
137 struct be_ctrl_info *ctrl = &phba->ctrl;
138 struct be_mcc_wrb *wrb;
139 struct be_sge *sge;
140 struct invalidate_commands_params_in *req;
141 unsigned int i, tag;
142
143 mutex_lock(&ctrl->mbox_lock);
144 wrb = alloc_mcc_wrb(phba, &tag);
145 if (!wrb) {
146 mutex_unlock(&ctrl->mbox_lock);
147 return 0;
148 }
149
150 req = nonemb_cmd->va;
151 memset(req, 0, sizeof(*req));
152 sge = nonembedded_sgl(wrb);
153
154 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
155 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
156 OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
157 sizeof(*req));
158 req->ref_handle = 0;
159 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
160 for (i = 0; i < num_invalidate; i++) {
161 req->table[i].icd = inv_tbl->icd;
162 req->table[i].cid = inv_tbl->cid;
163 req->icd_count++;
164 inv_tbl++;
165 }
166 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
167 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
168 sge->len = cpu_to_le32(nonemb_cmd->size);
169
170 be_mcc_notify(phba, tag);
171 mutex_unlock(&ctrl->mbox_lock);
172 return tag;
173}
174
175unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, 129unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
176 struct beiscsi_endpoint *beiscsi_ep, 130 struct beiscsi_endpoint *beiscsi_ep,
177 unsigned short cid, 131 unsigned short cid,
@@ -1066,7 +1020,6 @@ unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba)
1066unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) 1020unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
1067{ 1021{
1068 struct be_ctrl_info *ctrl = &phba->ctrl; 1022 struct be_ctrl_info *ctrl = &phba->ctrl;
1069 struct be_cmd_get_session_resp *resp;
1070 struct be_cmd_get_session_req *req; 1023 struct be_cmd_get_session_req *req;
1071 struct be_dma_mem *nonemb_cmd; 1024 struct be_dma_mem *nonemb_cmd;
1072 struct be_mcc_wrb *wrb; 1025 struct be_mcc_wrb *wrb;
@@ -1081,7 +1034,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
1081 } 1034 }
1082 1035
1083 nonemb_cmd = &phba->boot_struct.nonemb_cmd; 1036 nonemb_cmd = &phba->boot_struct.nonemb_cmd;
1084 nonemb_cmd->size = sizeof(*resp); 1037 nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
1085 nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, 1038 nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
1086 nonemb_cmd->size, 1039 nonemb_cmd->size,
1087 &nonemb_cmd->dma); 1040 &nonemb_cmd->dma);
@@ -1096,7 +1049,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
1096 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); 1049 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1097 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, 1050 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
1098 OPCODE_ISCSI_INI_SESSION_GET_A_SESSION, 1051 OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
1099 sizeof(*resp)); 1052 sizeof(struct be_cmd_get_session_resp));
1100 req->session_handle = phba->boot_struct.s_handle; 1053 req->session_handle = phba->boot_struct.s_handle;
1101 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 1054 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1102 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); 1055 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
@@ -1309,7 +1262,8 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
1309 case BE_DEVICE_ID1: 1262 case BE_DEVICE_ID1:
1310 case OC_DEVICE_ID1: 1263 case OC_DEVICE_ID1:
1311 case OC_DEVICE_ID2: 1264 case OC_DEVICE_ID2:
1312 return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n"); 1265 return snprintf(buf, PAGE_SIZE,
1266 "Obsolete/Unsupported BE2 Adapter Family\n");
1313 break; 1267 break;
1314 case BE_DEVICE_ID2: 1268 case BE_DEVICE_ID2:
1315 case OC_DEVICE_ID3: 1269 case OC_DEVICE_ID3:
@@ -1341,7 +1295,7 @@ beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
1341 struct Scsi_Host *shost = class_to_shost(dev); 1295 struct Scsi_Host *shost = class_to_shost(dev);
1342 struct beiscsi_hba *phba = iscsi_host_priv(shost); 1296 struct beiscsi_hba *phba = iscsi_host_priv(shost);
1343 1297
1344 return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n", 1298 return snprintf(buf, PAGE_SIZE, "Port Identifier : %u\n",
1345 phba->fw_config.phys_port); 1299 phba->fw_config.phys_port);
1346} 1300}
1347 1301
@@ -1494,3 +1448,64 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
1494 (params->dw[offsetof(struct amap_beiscsi_offload_params, 1448 (params->dw[offsetof(struct amap_beiscsi_offload_params,
1495 exp_statsn) / 32] + 1)); 1449 exp_statsn) / 32] + 1));
1496} 1450}
1451
1452int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
1453 struct invldt_cmd_tbl *inv_tbl,
1454 unsigned int nents)
1455{
1456 struct be_ctrl_info *ctrl = &phba->ctrl;
1457 struct invldt_cmds_params_in *req;
1458 struct be_dma_mem nonemb_cmd;
1459 struct be_mcc_wrb *wrb;
1460 unsigned int i, tag;
1461 struct be_sge *sge;
1462 int rc;
1463
1464 if (!nents || nents > BE_INVLDT_CMD_TBL_SZ)
1465 return -EINVAL;
1466
1467 nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
1468 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
1469 nonemb_cmd.size,
1470 &nonemb_cmd.dma);
1471 if (!nonemb_cmd.va) {
1472 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
1473 "BM_%d : invldt_cmds_params alloc failed\n");
1474 return -ENOMEM;
1475 }
1476
1477 mutex_lock(&ctrl->mbox_lock);
1478 wrb = alloc_mcc_wrb(phba, &tag);
1479 if (!wrb) {
1480 mutex_unlock(&ctrl->mbox_lock);
1481 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1482 nonemb_cmd.va, nonemb_cmd.dma);
1483 return -ENOMEM;
1484 }
1485
1486 req = nonemb_cmd.va;
1487 be_wrb_hdr_prepare(wrb, nonemb_cmd.size, false, 1);
1488 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1489 OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
1490 sizeof(*req));
1491 req->ref_handle = 0;
1492 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
1493 for (i = 0; i < nents; i++) {
1494 req->table[i].icd = inv_tbl[i].icd;
1495 req->table[i].cid = inv_tbl[i].cid;
1496 req->icd_count++;
1497 }
1498 sge = nonembedded_sgl(wrb);
1499 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1500 sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd.dma));
1501 sge->len = cpu_to_le32(nonemb_cmd.size);
1502
1503 be_mcc_notify(phba, tag);
1504 mutex_unlock(&ctrl->mbox_lock);
1505
1506 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
1507 if (rc != -EBUSY)
1508 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1509 nonemb_cmd.va, nonemb_cmd.dma);
1510 return rc;
1511}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index b897cfd57c72..308f1472f98a 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -36,66 +36,6 @@
36#define PCICFG_UE_STATUS_MASK_LOW 0xA8 36#define PCICFG_UE_STATUS_MASK_LOW 0xA8
37#define PCICFG_UE_STATUS_MASK_HI 0xAC 37#define PCICFG_UE_STATUS_MASK_HI 0xAC
38 38
39/**
40 * Pseudo amap definition in which each bit of the actual structure is defined
41 * as a byte: used to calculate offset/shift/mask of each field
42 */
43struct amap_mcc_sge {
44 u8 pa_lo[32]; /* dword 0 */
45 u8 pa_hi[32]; /* dword 1 */
46 u8 length[32]; /* DWORD 2 */
47} __packed;
48
49/**
50 * Pseudo amap definition in which each bit of the actual structure is defined
51 * as a byte: used to calculate offset/shift/mask of each field
52 */
53struct amap_mcc_wrb_payload {
54 union {
55 struct amap_mcc_sge sgl[19];
56 u8 embedded[59 * 32]; /* DWORDS 57 to 115 */
57 } u;
58} __packed;
59
60/**
61 * Pseudo amap definition in which each bit of the actual structure is defined
62 * as a byte: used to calculate offset/shift/mask of each field
63 */
64struct amap_mcc_wrb {
65 u8 embedded; /* DWORD 0 */
66 u8 rsvd0[2]; /* DWORD 0 */
67 u8 sge_count[5]; /* DWORD 0 */
68 u8 rsvd1[16]; /* DWORD 0 */
69 u8 special[8]; /* DWORD 0 */
70 u8 payload_length[32];
71 u8 tag[64]; /* DWORD 2 */
72 u8 rsvd2[32]; /* DWORD 4 */
73 struct amap_mcc_wrb_payload payload;
74};
75
76struct mcc_sge {
77 u32 pa_lo; /* dword 0 */
78 u32 pa_hi; /* dword 1 */
79 u32 length; /* DWORD 2 */
80} __packed;
81
82struct mcc_wrb_payload {
83 union {
84 struct mcc_sge sgl[19];
85 u32 embedded[59]; /* DWORDS 57 to 115 */
86 } u;
87} __packed;
88
89#define MCC_WRB_EMBEDDED_MASK 0x00000001
90
91struct mcc_wrb {
92 u32 dw[0]; /* DWORD 0 */
93 u32 payload_length;
94 u32 tag[2]; /* DWORD 2 */
95 u32 rsvd2[1]; /* DWORD 4 */
96 struct mcc_wrb_payload payload;
97};
98
99int mgmt_open_connection(struct beiscsi_hba *phba, 39int mgmt_open_connection(struct beiscsi_hba *phba,
100 struct sockaddr *dst_addr, 40 struct sockaddr *dst_addr,
101 struct beiscsi_endpoint *beiscsi_ep, 41 struct beiscsi_endpoint *beiscsi_ep,
@@ -104,10 +44,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
104unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, 44unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
105 unsigned short cid, 45 unsigned short cid,
106 unsigned int upload_flag); 46 unsigned int upload_flag);
107unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
108 struct invalidate_command_table *inv_tbl,
109 unsigned int num_invalidate, unsigned int cid,
110 struct be_dma_mem *nonemb_cmd);
111unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, 47unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
112 struct beiscsi_hba *phba, 48 struct beiscsi_hba *phba,
113 struct bsg_job *job, 49 struct bsg_job *job,
@@ -134,24 +70,31 @@ union iscsi_invalidate_connection_params {
134 struct iscsi_invalidate_connection_params_out response; 70 struct iscsi_invalidate_connection_params_out response;
135} __packed; 71} __packed;
136 72
137struct invalidate_commands_params_in { 73#define BE_INVLDT_CMD_TBL_SZ 128
74struct invldt_cmd_tbl {
75 unsigned short icd;
76 unsigned short cid;
77} __packed;
78
79struct invldt_cmds_params_in {
138 struct be_cmd_req_hdr hdr; 80 struct be_cmd_req_hdr hdr;
139 unsigned int ref_handle; 81 unsigned int ref_handle;
140 unsigned int icd_count; 82 unsigned int icd_count;
141 struct invalidate_command_table table[128]; 83 struct invldt_cmd_tbl table[BE_INVLDT_CMD_TBL_SZ];
142 unsigned short cleanup_type; 84 unsigned short cleanup_type;
143 unsigned short unused; 85 unsigned short unused;
144} __packed; 86} __packed;
145 87
146struct invalidate_commands_params_out { 88struct invldt_cmds_params_out {
89 struct be_cmd_resp_hdr hdr;
147 unsigned int ref_handle; 90 unsigned int ref_handle;
148 unsigned int icd_count; 91 unsigned int icd_count;
149 unsigned int icd_status[128]; 92 unsigned int icd_status[BE_INVLDT_CMD_TBL_SZ];
150} __packed; 93} __packed;
151 94
152union invalidate_commands_params { 95union be_invldt_cmds_params {
153 struct invalidate_commands_params_in request; 96 struct invldt_cmds_params_in request;
154 struct invalidate_commands_params_out response; 97 struct invldt_cmds_params_out response;
155} __packed; 98} __packed;
156 99
157struct mgmt_hba_attributes { 100struct mgmt_hba_attributes {
@@ -231,16 +174,6 @@ struct be_bsg_vendor_cmd {
231 174
232#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws) 175#define GET_MGMT_CONTROLLER_WS(phba) (phba->pmgmt_ws)
233 176
234/* MGMT CMD flags */
235
236#define MGMT_CMDH_FREE (1<<0)
237
238/* --- MGMT_ERROR_CODES --- */
239/* Error Codes returned in the status field of the CMD response header */
240#define MGMT_STATUS_SUCCESS 0 /* The CMD completed without errors */
241#define MGMT_STATUS_FAILED 1 /* Error status in the Status field of */
242 /* the CMD_RESPONSE_HEADER */
243
244#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\ 177#define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
245 pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\ 178 pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
246 bus_address.u.a32.address_lo; \ 179 bus_address.u.a32.address_lo; \
@@ -270,6 +203,9 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
270 unsigned short cid, 203 unsigned short cid,
271 unsigned short issue_reset, 204 unsigned short issue_reset,
272 unsigned short savecfg_flag); 205 unsigned short savecfg_flag);
206int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
207 struct invldt_cmd_tbl *inv_tbl,
208 unsigned int nents);
273 209
274int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type); 210int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
275 211
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 1e7e139d71ea..4aa61e20e82d 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -28,24 +28,6 @@
28BFA_TRC_FILE(FCS, FCS); 28BFA_TRC_FILE(FCS, FCS);
29 29
30/* 30/*
31 * FCS sub-modules
32 */
33struct bfa_fcs_mod_s {
34 void (*attach) (struct bfa_fcs_s *fcs);
35 void (*modinit) (struct bfa_fcs_s *fcs);
36 void (*modexit) (struct bfa_fcs_s *fcs);
37};
38
39#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
40
41static struct bfa_fcs_mod_s fcs_modules[] = {
42 { bfa_fcs_port_attach, NULL, NULL },
43 { bfa_fcs_uf_attach, NULL, NULL },
44 { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
45 bfa_fcs_fabric_modexit },
46};
47
48/*
49 * fcs_api BFA FCS API 31 * fcs_api BFA FCS API
50 */ 32 */
51 33
@@ -58,52 +40,19 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
58 complete(&bfad->comp); 40 complete(&bfad->comp);
59} 41}
60 42
61
62
63/* 43/*
64 * fcs_api BFA FCS API 44 * fcs initialization, called once after bfa initialization is complete
65 */
66
67/*
68 * fcs attach -- called once to initialize data structures at driver attach time
69 */ 45 */
70void 46void
71bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 47bfa_fcs_init(struct bfa_fcs_s *fcs)
72 bfa_boolean_t min_cfg)
73{ 48{
74 int i; 49 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
75 struct bfa_fcs_mod_s *mod; 50 bfa_trc(fcs, 0);
76
77 fcs->bfa = bfa;
78 fcs->bfad = bfad;
79 fcs->min_cfg = min_cfg;
80 fcs->num_rport_logins = 0;
81
82 bfa->fcs = BFA_TRUE;
83 fcbuild_init();
84
85 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
86 mod = &fcs_modules[i];
87 if (mod->attach)
88 mod->attach(fcs);
89 }
90} 51}
91 52
92/* 53/*
93 * fcs initialization, called once after bfa initialization is complete 54 * fcs_api BFA FCS API
94 */ 55 */
95void
96bfa_fcs_init(struct bfa_fcs_s *fcs)
97{
98 int i;
99 struct bfa_fcs_mod_s *mod;
100
101 for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
102 mod = &fcs_modules[i];
103 if (mod->modinit)
104 mod->modinit(fcs);
105 }
106}
107 56
108/* 57/*
109 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port 58 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
@@ -180,26 +129,14 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
180void 129void
181bfa_fcs_exit(struct bfa_fcs_s *fcs) 130bfa_fcs_exit(struct bfa_fcs_s *fcs)
182{ 131{
183 struct bfa_fcs_mod_s *mod;
184 int nmods, i;
185
186 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); 132 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
187 133 bfa_wc_up(&fcs->wc);
188 nmods = ARRAY_SIZE(fcs_modules); 134 bfa_trc(fcs, 0);
189 135 bfa_lps_delete(fcs->fabric.lps);
190 for (i = 0; i < nmods; i++) { 136 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_DELETE);
191
192 mod = &fcs_modules[i];
193 if (mod->modexit) {
194 bfa_wc_up(&fcs->wc);
195 mod->modexit(fcs);
196 }
197 }
198
199 bfa_wc_wait(&fcs->wc); 137 bfa_wc_wait(&fcs->wc);
200} 138}
201 139
202
203/* 140/*
204 * Fabric module implementation. 141 * Fabric module implementation.
205 */ 142 */
@@ -1128,62 +1065,6 @@ bfa_fcs_fabric_stop_comp(void *cbarg)
1128 */ 1065 */
1129 1066
1130/* 1067/*
1131 * Attach time initialization.
1132 */
1133void
1134bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
1135{
1136 struct bfa_fcs_fabric_s *fabric;
1137
1138 fabric = &fcs->fabric;
1139 memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
1140
1141 /*
1142 * Initialize base fabric.
1143 */
1144 fabric->fcs = fcs;
1145 INIT_LIST_HEAD(&fabric->vport_q);
1146 INIT_LIST_HEAD(&fabric->vf_q);
1147 fabric->lps = bfa_lps_alloc(fcs->bfa);
1148 WARN_ON(!fabric->lps);
1149
1150 /*
1151 * Initialize fabric delete completion handler. Fabric deletion is
1152 * complete when the last vport delete is complete.
1153 */
1154 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
1155 bfa_wc_up(&fabric->wc); /* For the base port */
1156
1157 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
1158 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
1159}
1160
1161void
1162bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
1163{
1164 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
1165 bfa_trc(fcs, 0);
1166}
1167
1168/*
1169 * Module cleanup
1170 */
1171void
1172bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1173{
1174 struct bfa_fcs_fabric_s *fabric;
1175
1176 bfa_trc(fcs, 0);
1177
1178 /*
1179 * Cleanup base fabric.
1180 */
1181 fabric = &fcs->fabric;
1182 bfa_lps_delete(fabric->lps);
1183 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
1184}
1185
1186/*
1187 * Fabric module stop -- stop FCS actions 1068 * Fabric module stop -- stop FCS actions
1188 */ 1069 */
1189void 1070void
@@ -1633,12 +1514,6 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
1633 } 1514 }
1634} 1515}
1635 1516
1636void
1637bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
1638{
1639 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
1640}
1641
1642/* 1517/*
1643 * BFA FCS UF ( Unsolicited Frames) 1518 * BFA FCS UF ( Unsolicited Frames)
1644 */ 1519 */
@@ -1706,8 +1581,44 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
1706 bfa_uf_free(uf); 1581 bfa_uf_free(uf);
1707} 1582}
1708 1583
1584/*
1585 * fcs attach -- called once to initialize data structures at driver attach time
1586 */
1709void 1587void
1710bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) 1588bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
1589 bfa_boolean_t min_cfg)
1711{ 1590{
1591 struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
1592
1593 fcs->bfa = bfa;
1594 fcs->bfad = bfad;
1595 fcs->min_cfg = min_cfg;
1596 fcs->num_rport_logins = 0;
1597
1598 bfa->fcs = BFA_TRUE;
1599 fcbuild_init();
1600
1601 bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
1712 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); 1602 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
1603
1604 memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
1605
1606 /*
1607 * Initialize base fabric.
1608 */
1609 fabric->fcs = fcs;
1610 INIT_LIST_HEAD(&fabric->vport_q);
1611 INIT_LIST_HEAD(&fabric->vf_q);
1612 fabric->lps = bfa_lps_alloc(fcs->bfa);
1613 WARN_ON(!fabric->lps);
1614
1615 /*
1616 * Initialize fabric delete completion handler. Fabric deletion is
1617 * complete when the last vport delete is complete.
1618 */
1619 bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
1620 bfa_wc_up(&fabric->wc); /* For the base port */
1621
1622 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
1623 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
1713} 1624}
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 0f797a55d504..e60f72b766ea 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -808,9 +808,7 @@ void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
808/* 808/*
809 * fabric protected interface functions 809 * fabric protected interface functions
810 */ 810 */
811void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
812void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); 811void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
813void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
814void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); 812void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
815void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); 813void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
816void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, 814void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
@@ -827,8 +825,6 @@ void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
827void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, 825void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
828 wwn_t fabric_name); 826 wwn_t fabric_name);
829u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); 827u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
830void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
831void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
832void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs); 828void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
833void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, 829void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
834 enum bfa_fcs_fabric_event event); 830 enum bfa_fcs_fabric_event event);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 02d806012fa1..7eb0eef18fdd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -813,6 +813,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
813 .name = BFAD_DRIVER_NAME, 813 .name = BFAD_DRIVER_NAME,
814 .info = bfad_im_info, 814 .info = bfad_im_info,
815 .queuecommand = bfad_im_queuecommand, 815 .queuecommand = bfad_im_queuecommand,
816 .eh_timed_out = fc_eh_timed_out,
816 .eh_abort_handler = bfad_im_abort_handler, 817 .eh_abort_handler = bfad_im_abort_handler,
817 .eh_device_reset_handler = bfad_im_reset_lun_handler, 818 .eh_device_reset_handler = bfad_im_reset_lun_handler,
818 .eh_bus_reset_handler = bfad_im_reset_bus_handler, 819 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
@@ -835,6 +836,7 @@ struct scsi_host_template bfad_im_vport_template = {
835 .name = BFAD_DRIVER_NAME, 836 .name = BFAD_DRIVER_NAME,
836 .info = bfad_im_info, 837 .info = bfad_im_info,
837 .queuecommand = bfad_im_queuecommand, 838 .queuecommand = bfad_im_queuecommand,
839 .eh_timed_out = fc_eh_timed_out,
838 .eh_abort_handler = bfad_im_abort_handler, 840 .eh_abort_handler = bfad_im_abort_handler,
839 .eh_device_reset_handler = bfad_im_reset_lun_handler, 841 .eh_device_reset_handler = bfad_im_reset_lun_handler,
840 .eh_bus_reset_handler = bfad_im_reset_bus_handler, 842 .eh_bus_reset_handler = bfad_im_reset_bus_handler,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c639d5a02656..b1e39f985ec9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2947,6 +2947,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
2947 .module = THIS_MODULE, 2947 .module = THIS_MODULE,
2948 .name = "QLogic Offload FCoE Initiator", 2948 .name = "QLogic Offload FCoE Initiator",
2949 .queuecommand = bnx2fc_queuecommand, 2949 .queuecommand = bnx2fc_queuecommand,
2950 .eh_timed_out = fc_eh_timed_out,
2950 .eh_abort_handler = bnx2fc_eh_abort, /* abts */ 2951 .eh_abort_handler = bnx2fc_eh_abort, /* abts */
2951 .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ 2952 .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
2952 .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ 2953 .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 133901fd3e35..f32a66f89d25 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2259,6 +2259,7 @@ static struct scsi_host_template bnx2i_host_template = {
2259 .name = "QLogic Offload iSCSI Initiator", 2259 .name = "QLogic Offload iSCSI Initiator",
2260 .proc_name = "bnx2i", 2260 .proc_name = "bnx2i",
2261 .queuecommand = iscsi_queuecommand, 2261 .queuecommand = iscsi_queuecommand,
2262 .eh_timed_out = iscsi_eh_cmd_timed_out,
2262 .eh_abort_handler = iscsi_eh_abort, 2263 .eh_abort_handler = iscsi_eh_abort,
2263 .eh_device_reset_handler = iscsi_eh_device_reset, 2264 .eh_device_reset_handler = iscsi_eh_device_reset,
2264 .eh_target_reset_handler = iscsi_eh_recover_target, 2265 .eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 89a52b941ea8..a1ff75f1384f 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2270,6 +2270,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
2270 .name = CSIO_DRV_DESC, 2270 .name = CSIO_DRV_DESC,
2271 .proc_name = KBUILD_MODNAME, 2271 .proc_name = KBUILD_MODNAME,
2272 .queuecommand = csio_queuecommand, 2272 .queuecommand = csio_queuecommand,
2273 .eh_timed_out = fc_eh_timed_out,
2273 .eh_abort_handler = csio_eh_abort_handler, 2274 .eh_abort_handler = csio_eh_abort_handler,
2274 .eh_device_reset_handler = csio_eh_lun_reset_handler, 2275 .eh_device_reset_handler = csio_eh_lun_reset_handler,
2275 .slave_alloc = csio_slave_alloc, 2276 .slave_alloc = csio_slave_alloc,
@@ -2289,6 +2290,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
2289 .name = CSIO_DRV_DESC, 2290 .name = CSIO_DRV_DESC,
2290 .proc_name = KBUILD_MODNAME, 2291 .proc_name = KBUILD_MODNAME,
2291 .queuecommand = csio_queuecommand, 2292 .queuecommand = csio_queuecommand,
2293 .eh_timed_out = fc_eh_timed_out,
2292 .eh_abort_handler = csio_eh_abort_handler, 2294 .eh_abort_handler = csio_eh_abort_handler,
2293 .eh_device_reset_handler = csio_eh_lun_reset_handler, 2295 .eh_device_reset_handler = csio_eh_lun_reset_handler,
2294 .slave_alloc = csio_slave_alloc, 2296 .slave_alloc = csio_slave_alloc,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 33e83464e091..1880eb6c68f7 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -90,6 +90,7 @@ static struct scsi_host_template cxgb3i_host_template = {
90 .sg_tablesize = SG_ALL, 90 .sg_tablesize = SG_ALL,
91 .max_sectors = 0xFFFF, 91 .max_sectors = 0xFFFF,
92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
93 .eh_timed_out = iscsi_eh_cmd_timed_out,
93 .eh_abort_handler = iscsi_eh_abort, 94 .eh_abort_handler = iscsi_eh_abort,
94 .eh_device_reset_handler = iscsi_eh_device_reset, 95 .eh_device_reset_handler = iscsi_eh_device_reset,
95 .eh_target_reset_handler = iscsi_eh_recover_target, 96 .eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 9a2fdc305cf2..3fb3f5708ff7 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -103,6 +103,7 @@ static struct scsi_host_template cxgb4i_host_template = {
103 .sg_tablesize = SG_ALL, 103 .sg_tablesize = SG_ALL,
104 .max_sectors = 0xFFFF, 104 .max_sectors = 0xFFFF,
105 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 105 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
106 .eh_timed_out = iscsi_eh_cmd_timed_out,
106 .eh_abort_handler = iscsi_eh_abort, 107 .eh_abort_handler = iscsi_eh_abort,
107 .eh_device_reset_handler = iscsi_eh_device_reset, 108 .eh_device_reset_handler = iscsi_eh_device_reset,
108 .eh_target_reset_handler = iscsi_eh_recover_target, 109 .eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 0e9de5d62da2..d11dcc59ff46 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -54,6 +54,9 @@ extern const struct file_operations cxlflash_cxl_fops;
54/* RRQ for master issued cmds */ 54/* RRQ for master issued cmds */
55#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS 55#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
56 56
57/* SQ for master issued cmds */
58#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
59
57 60
58static inline void check_sizes(void) 61static inline void check_sizes(void)
59{ 62{
@@ -155,8 +158,8 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
155 158
156struct afu { 159struct afu {
157 /* Stuff requiring alignment go first. */ 160 /* Stuff requiring alignment go first. */
158 161 struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
159 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ 162 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
160 163
161 /* Beware of alignment till here. Preferably introduce new 164 /* Beware of alignment till here. Preferably introduce new
162 * fields after this point 165 * fields after this point
@@ -171,9 +174,13 @@ struct afu {
171 struct sisl_host_map __iomem *host_map; /* MC host map */ 174 struct sisl_host_map __iomem *host_map; /* MC host map */
172 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ 175 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
173 176
174 struct kref mapcount;
175
176 ctx_hndl_t ctx_hndl; /* master's context handle */ 177 ctx_hndl_t ctx_hndl; /* master's context handle */
178
179 atomic_t hsq_credits;
180 spinlock_t hsq_slock;
181 struct sisl_ioarcb *hsq_start;
182 struct sisl_ioarcb *hsq_end;
183 struct sisl_ioarcb *hsq_curr;
177 u64 *hrrq_start; 184 u64 *hrrq_start;
178 u64 *hrrq_end; 185 u64 *hrrq_end;
179 u64 *hrrq_curr; 186 u64 *hrrq_curr;
@@ -191,6 +198,23 @@ struct afu {
191 198
192}; 199};
193 200
201static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
202{
203 u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
204
205 return afu_cap & cmd_mode;
206}
207
208static inline bool afu_is_sq_cmd_mode(struct afu *afu)
209{
210 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
211}
212
213static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
214{
215 return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
216}
217
194static inline u64 lun_to_lunid(u64 lun) 218static inline u64 lun_to_lunid(u64 lun)
195{ 219{
196 __be64 lun_id; 220 __be64 lun_id;
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index 6c318db90c85..0efed177cc8b 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -32,11 +32,13 @@
32 */ 32 */
33static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid) 33static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
34{ 34{
35 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
36 struct device *dev = &cfg->dev->dev;
35 struct llun_info *lli = NULL; 37 struct llun_info *lli = NULL;
36 38
37 lli = kzalloc(sizeof(*lli), GFP_KERNEL); 39 lli = kzalloc(sizeof(*lli), GFP_KERNEL);
38 if (unlikely(!lli)) { 40 if (unlikely(!lli)) {
39 pr_err("%s: could not allocate lli\n", __func__); 41 dev_err(dev, "%s: could not allocate lli\n", __func__);
40 goto out; 42 goto out;
41 } 43 }
42 44
@@ -58,11 +60,13 @@ out:
58 */ 60 */
59static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid) 61static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
60{ 62{
63 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
64 struct device *dev = &cfg->dev->dev;
61 struct glun_info *gli = NULL; 65 struct glun_info *gli = NULL;
62 66
63 gli = kzalloc(sizeof(*gli), GFP_KERNEL); 67 gli = kzalloc(sizeof(*gli), GFP_KERNEL);
64 if (unlikely(!gli)) { 68 if (unlikely(!gli)) {
65 pr_err("%s: could not allocate gli\n", __func__); 69 dev_err(dev, "%s: could not allocate gli\n", __func__);
66 goto out; 70 goto out;
67 } 71 }
68 72
@@ -129,10 +133,10 @@ static struct glun_info *lookup_global(u8 *wwid)
129 */ 133 */
130static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid) 134static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
131{ 135{
136 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
137 struct device *dev = &cfg->dev->dev;
132 struct llun_info *lli = NULL; 138 struct llun_info *lli = NULL;
133 struct glun_info *gli = NULL; 139 struct glun_info *gli = NULL;
134 struct Scsi_Host *shost = sdev->host;
135 struct cxlflash_cfg *cfg = shost_priv(shost);
136 140
137 if (unlikely(!wwid)) 141 if (unlikely(!wwid))
138 goto out; 142 goto out;
@@ -165,7 +169,7 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
165 list_add(&gli->list, &global.gluns); 169 list_add(&gli->list, &global.gluns);
166 170
167out: 171out:
168 pr_debug("%s: returning %p\n", __func__, lli); 172 dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli);
169 return lli; 173 return lli;
170} 174}
171 175
@@ -225,17 +229,18 @@ void cxlflash_term_global_luns(void)
225int cxlflash_manage_lun(struct scsi_device *sdev, 229int cxlflash_manage_lun(struct scsi_device *sdev,
226 struct dk_cxlflash_manage_lun *manage) 230 struct dk_cxlflash_manage_lun *manage)
227{ 231{
228 int rc = 0; 232 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
233 struct device *dev = &cfg->dev->dev;
229 struct llun_info *lli = NULL; 234 struct llun_info *lli = NULL;
235 int rc = 0;
230 u64 flags = manage->hdr.flags; 236 u64 flags = manage->hdr.flags;
231 u32 chan = sdev->channel; 237 u32 chan = sdev->channel;
232 238
233 mutex_lock(&global.mutex); 239 mutex_lock(&global.mutex);
234 lli = find_and_create_lun(sdev, manage->wwid); 240 lli = find_and_create_lun(sdev, manage->wwid);
235 pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n", 241 dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n",
236 __func__, get_unaligned_be64(&manage->wwid[0]), 242 __func__, get_unaligned_be64(&manage->wwid[0]),
237 get_unaligned_be64(&manage->wwid[8]), 243 get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli);
238 manage->hdr.flags, lli);
239 if (unlikely(!lli)) { 244 if (unlikely(!lli)) {
240 rc = -ENOMEM; 245 rc = -ENOMEM;
241 goto out; 246 goto out;
@@ -265,11 +270,11 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
265 } 270 }
266 } 271 }
267 272
268 pr_debug("%s: port_sel = %08X chan = %u lun_id = %016llX\n", __func__, 273 dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n",
269 lli->port_sel, chan, lli->lun_id[chan]); 274 __func__, lli->port_sel, chan, lli->lun_id[chan]);
270 275
271out: 276out:
272 mutex_unlock(&global.mutex); 277 mutex_unlock(&global.mutex);
273 pr_debug("%s: returning rc=%d\n", __func__, rc); 278 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
274 return rc; 279 return rc;
275} 280}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b17ebf6d0a7e..7069639e92bc 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -43,6 +43,9 @@ MODULE_LICENSE("GPL");
43 */ 43 */
44static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) 44static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
45{ 45{
46 struct afu *afu = cmd->parent;
47 struct cxlflash_cfg *cfg = afu->parent;
48 struct device *dev = &cfg->dev->dev;
46 struct sisl_ioarcb *ioarcb; 49 struct sisl_ioarcb *ioarcb;
47 struct sisl_ioasa *ioasa; 50 struct sisl_ioasa *ioasa;
48 u32 resid; 51 u32 resid;
@@ -56,21 +59,20 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
56 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { 59 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
57 resid = ioasa->resid; 60 resid = ioasa->resid;
58 scsi_set_resid(scp, resid); 61 scsi_set_resid(scp, resid);
59 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", 62 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
60 __func__, cmd, scp, resid); 63 __func__, cmd, scp, resid);
61 } 64 }
62 65
63 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { 66 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
64 pr_debug("%s: cmd underrun cmd = %p scp = %p\n", 67 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
65 __func__, cmd, scp); 68 __func__, cmd, scp);
66 scp->result = (DID_ERROR << 16); 69 scp->result = (DID_ERROR << 16);
67 } 70 }
68 71
69 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " 72 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
70 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", 73 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
71 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, 74 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
72 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, 75 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
73 ioasa->fc_extra);
74 76
75 if (ioasa->rc.scsi_rc) { 77 if (ioasa->rc.scsi_rc) {
76 /* We have a SCSI status */ 78 /* We have a SCSI status */
@@ -159,6 +161,7 @@ static void cmd_complete(struct afu_cmd *cmd)
159 ulong lock_flags; 161 ulong lock_flags;
160 struct afu *afu = cmd->parent; 162 struct afu *afu = cmd->parent;
161 struct cxlflash_cfg *cfg = afu->parent; 163 struct cxlflash_cfg *cfg = afu->parent;
164 struct device *dev = &cfg->dev->dev;
162 bool cmd_is_tmf; 165 bool cmd_is_tmf;
163 166
164 if (cmd->scp) { 167 if (cmd->scp) {
@@ -170,9 +173,8 @@ static void cmd_complete(struct afu_cmd *cmd)
170 173
171 cmd_is_tmf = cmd->cmd_tmf; 174 cmd_is_tmf = cmd->cmd_tmf;
172 175
173 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " 176 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
174 "ioasc=%d\n", __func__, scp, scp->result, 177 __func__, scp, scp->result, cmd->sa.ioasc);
175 cmd->sa.ioasc);
176 178
177 scsi_dma_unmap(scp); 179 scsi_dma_unmap(scp);
178 scp->scsi_done(scp); 180 scp->scsi_done(scp);
@@ -188,10 +190,11 @@ static void cmd_complete(struct afu_cmd *cmd)
188} 190}
189 191
190/** 192/**
191 * context_reset_ioarrin() - reset command owner context via IOARRIN register 193 * context_reset() - reset command owner context via specified register
192 * @cmd: AFU command that timed out. 194 * @cmd: AFU command that timed out.
195 * @reset_reg: MMIO register to perform reset.
193 */ 196 */
194static void context_reset_ioarrin(struct afu_cmd *cmd) 197static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
195{ 198{
196 int nretry = 0; 199 int nretry = 0;
197 u64 rrin = 0x1; 200 u64 rrin = 0x1;
@@ -199,22 +202,44 @@ static void context_reset_ioarrin(struct afu_cmd *cmd)
199 struct cxlflash_cfg *cfg = afu->parent; 202 struct cxlflash_cfg *cfg = afu->parent;
200 struct device *dev = &cfg->dev->dev; 203 struct device *dev = &cfg->dev->dev;
201 204
202 pr_debug("%s: cmd=%p\n", __func__, cmd); 205 dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
203 206
204 writeq_be(rrin, &afu->host_map->ioarrin); 207 writeq_be(rrin, reset_reg);
205 do { 208 do {
206 rrin = readq_be(&afu->host_map->ioarrin); 209 rrin = readq_be(reset_reg);
207 if (rrin != 0x1) 210 if (rrin != 0x1)
208 break; 211 break;
209 /* Double delay each time */ 212 /* Double delay each time */
210 udelay(1 << nretry); 213 udelay(1 << nretry);
211 } while (nretry++ < MC_ROOM_RETRY_CNT); 214 } while (nretry++ < MC_ROOM_RETRY_CNT);
212 215
213 dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n", 216 dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
214 __func__, rrin, nretry); 217 __func__, rrin, nretry);
215} 218}
216 219
217/** 220/**
221 * context_reset_ioarrin() - reset command owner context via IOARRIN register
222 * @cmd: AFU command that timed out.
223 */
224static void context_reset_ioarrin(struct afu_cmd *cmd)
225{
226 struct afu *afu = cmd->parent;
227
228 context_reset(cmd, &afu->host_map->ioarrin);
229}
230
231/**
232 * context_reset_sq() - reset command owner context w/ SQ Context Reset register
233 * @cmd: AFU command that timed out.
234 */
235static void context_reset_sq(struct afu_cmd *cmd)
236{
237 struct afu *afu = cmd->parent;
238
239 context_reset(cmd, &afu->host_map->sq_ctx_reset);
240}
241
242/**
218 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register 243 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
219 * @afu: AFU associated with the host. 244 * @afu: AFU associated with the host.
220 * @cmd: AFU command to send. 245 * @cmd: AFU command to send.
@@ -251,8 +276,51 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
251 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); 276 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
252out: 277out:
253 spin_unlock_irqrestore(&afu->rrin_slock, lock_flags); 278 spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
254 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, 279 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
255 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); 280 cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
281 return rc;
282}
283
284/**
285 * send_cmd_sq() - sends an AFU command via SQ ring
286 * @afu: AFU associated with the host.
287 * @cmd: AFU command to send.
288 *
289 * Return:
290 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
291 */
292static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
293{
294 struct cxlflash_cfg *cfg = afu->parent;
295 struct device *dev = &cfg->dev->dev;
296 int rc = 0;
297 int newval;
298 ulong lock_flags;
299
300 newval = atomic_dec_if_positive(&afu->hsq_credits);
301 if (newval <= 0) {
302 rc = SCSI_MLQUEUE_HOST_BUSY;
303 goto out;
304 }
305
306 cmd->rcb.ioasa = &cmd->sa;
307
308 spin_lock_irqsave(&afu->hsq_slock, lock_flags);
309
310 *afu->hsq_curr = cmd->rcb;
311 if (afu->hsq_curr < afu->hsq_end)
312 afu->hsq_curr++;
313 else
314 afu->hsq_curr = afu->hsq_start;
315 writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail);
316
317 spin_unlock_irqrestore(&afu->hsq_slock, lock_flags);
318out:
319 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
320 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
321 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr,
322 readq_be(&afu->host_map->sq_head),
323 readq_be(&afu->host_map->sq_tail));
256 return rc; 324 return rc;
257} 325}
258 326
@@ -266,6 +334,8 @@ out:
266 */ 334 */
267static int wait_resp(struct afu *afu, struct afu_cmd *cmd) 335static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
268{ 336{
337 struct cxlflash_cfg *cfg = afu->parent;
338 struct device *dev = &cfg->dev->dev;
269 int rc = 0; 339 int rc = 0;
270 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 340 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
271 341
@@ -276,10 +346,8 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
276 } 346 }
277 347
278 if (unlikely(cmd->sa.ioasc != 0)) { 348 if (unlikely(cmd->sa.ioasc != 0)) {
279 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " 349 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
280 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], 350 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
281 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
282 cmd->sa.rc.fc_rc);
283 rc = -1; 351 rc = -1;
284 } 352 }
285 353
@@ -298,8 +366,7 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
298static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) 366static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
299{ 367{
300 u32 port_sel = scp->device->channel + 1; 368 u32 port_sel = scp->device->channel + 1;
301 struct Scsi_Host *host = scp->device->host; 369 struct cxlflash_cfg *cfg = shost_priv(scp->device->host);
302 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
303 struct afu_cmd *cmd = sc_to_afucz(scp); 370 struct afu_cmd *cmd = sc_to_afucz(scp);
304 struct device *dev = &cfg->dev->dev; 371 struct device *dev = &cfg->dev->dev;
305 ulong lock_flags; 372 ulong lock_flags;
@@ -344,7 +411,7 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
344 to); 411 to);
345 if (!to) { 412 if (!to) {
346 cfg->tmf_active = false; 413 cfg->tmf_active = false;
347 dev_err(dev, "%s: TMF timed out!\n", __func__); 414 dev_err(dev, "%s: TMF timed out\n", __func__);
348 rc = -1; 415 rc = -1;
349 } 416 }
350 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 417 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -352,16 +419,6 @@ out:
352 return rc; 419 return rc;
353} 420}
354 421
355static void afu_unmap(struct kref *ref)
356{
357 struct afu *afu = container_of(ref, struct afu, mapcount);
358
359 if (likely(afu->afu_map)) {
360 cxl_psa_unmap((void __iomem *)afu->afu_map);
361 afu->afu_map = NULL;
362 }
363}
364
365/** 422/**
366 * cxlflash_driver_info() - information handler for this host driver 423 * cxlflash_driver_info() - information handler for this host driver
367 * @host: SCSI host associated with device. 424 * @host: SCSI host associated with device.
@@ -382,7 +439,7 @@ static const char *cxlflash_driver_info(struct Scsi_Host *host)
382 */ 439 */
383static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) 440static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
384{ 441{
385 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 442 struct cxlflash_cfg *cfg = shost_priv(host);
386 struct afu *afu = cfg->afu; 443 struct afu *afu = cfg->afu;
387 struct device *dev = &cfg->dev->dev; 444 struct device *dev = &cfg->dev->dev;
388 struct afu_cmd *cmd = sc_to_afucz(scp); 445 struct afu_cmd *cmd = sc_to_afucz(scp);
@@ -392,10 +449,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
392 ulong lock_flags; 449 ulong lock_flags;
393 int nseg = 0; 450 int nseg = 0;
394 int rc = 0; 451 int rc = 0;
395 int kref_got = 0;
396 452
397 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " 453 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
398 "cdb=(%08X-%08X-%08X-%08X)\n", 454 "cdb=(%08x-%08x-%08x-%08x)\n",
399 __func__, scp, host->host_no, scp->device->channel, 455 __func__, scp, host->host_no, scp->device->channel,
400 scp->device->id, scp->device->lun, 456 scp->device->id, scp->device->lun,
401 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 457 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
@@ -417,11 +473,11 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
417 473
418 switch (cfg->state) { 474 switch (cfg->state) {
419 case STATE_RESET: 475 case STATE_RESET:
420 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); 476 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
421 rc = SCSI_MLQUEUE_HOST_BUSY; 477 rc = SCSI_MLQUEUE_HOST_BUSY;
422 goto out; 478 goto out;
423 case STATE_FAILTERM: 479 case STATE_FAILTERM:
424 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); 480 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
425 scp->result = (DID_NO_CONNECT << 16); 481 scp->result = (DID_NO_CONNECT << 16);
426 scp->scsi_done(scp); 482 scp->scsi_done(scp);
427 rc = 0; 483 rc = 0;
@@ -430,13 +486,10 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
430 break; 486 break;
431 } 487 }
432 488
433 kref_get(&cfg->afu->mapcount);
434 kref_got = 1;
435
436 if (likely(sg)) { 489 if (likely(sg)) {
437 nseg = scsi_dma_map(scp); 490 nseg = scsi_dma_map(scp);
438 if (unlikely(nseg < 0)) { 491 if (unlikely(nseg < 0)) {
439 dev_err(dev, "%s: Fail DMA map!\n", __func__); 492 dev_err(dev, "%s: Fail DMA map\n", __func__);
440 rc = SCSI_MLQUEUE_HOST_BUSY; 493 rc = SCSI_MLQUEUE_HOST_BUSY;
441 goto out; 494 goto out;
442 } 495 }
@@ -463,9 +516,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
463 if (unlikely(rc)) 516 if (unlikely(rc))
464 scsi_dma_unmap(scp); 517 scsi_dma_unmap(scp);
465out: 518out:
466 if (kref_got)
467 kref_put(&afu->mapcount, afu_unmap);
468 pr_devel("%s: returning rc=%d\n", __func__, rc);
469 return rc; 519 return rc;
470} 520}
471 521
@@ -503,13 +553,15 @@ static void free_mem(struct cxlflash_cfg *cfg)
503 * 553 *
504 * Safe to call with AFU in a partially allocated/initialized state. 554 * Safe to call with AFU in a partially allocated/initialized state.
505 * 555 *
506 * Waits for any active internal AFU commands to timeout and then unmaps 556 * Cancels scheduled worker threads, waits for any active internal AFU
507 * the MMIO space. 557 * commands to timeout and then unmaps the MMIO space.
508 */ 558 */
509static void stop_afu(struct cxlflash_cfg *cfg) 559static void stop_afu(struct cxlflash_cfg *cfg)
510{ 560{
511 struct afu *afu = cfg->afu; 561 struct afu *afu = cfg->afu;
512 562
563 cancel_work_sync(&cfg->work_q);
564
513 if (likely(afu)) { 565 if (likely(afu)) {
514 while (atomic_read(&afu->cmds_active)) 566 while (atomic_read(&afu->cmds_active))
515 ssleep(1); 567 ssleep(1);
@@ -517,7 +569,6 @@ static void stop_afu(struct cxlflash_cfg *cfg)
517 cxl_psa_unmap((void __iomem *)afu->afu_map); 569 cxl_psa_unmap((void __iomem *)afu->afu_map);
518 afu->afu_map = NULL; 570 afu->afu_map = NULL;
519 } 571 }
520 kref_put(&afu->mapcount, afu_unmap);
521 } 572 }
522} 573}
523 574
@@ -585,6 +636,8 @@ static void term_mc(struct cxlflash_cfg *cfg)
585 */ 636 */
586static void term_afu(struct cxlflash_cfg *cfg) 637static void term_afu(struct cxlflash_cfg *cfg)
587{ 638{
639 struct device *dev = &cfg->dev->dev;
640
588 /* 641 /*
589 * Tear down is carefully orchestrated to ensure 642 * Tear down is carefully orchestrated to ensure
590 * no interrupts can come in when the problem state 643 * no interrupts can come in when the problem state
@@ -600,7 +653,7 @@ static void term_afu(struct cxlflash_cfg *cfg)
600 653
601 term_mc(cfg); 654 term_mc(cfg);
602 655
603 pr_debug("%s: returning\n", __func__); 656 dev_dbg(dev, "%s: returning\n", __func__);
604} 657}
605 658
606/** 659/**
@@ -627,8 +680,7 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
627 return; 680 return;
628 681
629 if (!afu || !afu->afu_map) { 682 if (!afu || !afu->afu_map) {
630 dev_dbg(dev, "%s: The problem state area is not mapped\n", 683 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
631 __func__);
632 return; 684 return;
633 } 685 }
634 686
@@ -670,10 +722,11 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
670static void cxlflash_remove(struct pci_dev *pdev) 722static void cxlflash_remove(struct pci_dev *pdev)
671{ 723{
672 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 724 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
725 struct device *dev = &pdev->dev;
673 ulong lock_flags; 726 ulong lock_flags;
674 727
675 if (!pci_is_enabled(pdev)) { 728 if (!pci_is_enabled(pdev)) {
676 pr_debug("%s: Device is disabled\n", __func__); 729 dev_dbg(dev, "%s: Device is disabled\n", __func__);
677 return; 730 return;
678 } 731 }
679 732
@@ -699,7 +752,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
699 scsi_remove_host(cfg->host); 752 scsi_remove_host(cfg->host);
700 /* fall through */ 753 /* fall through */
701 case INIT_STATE_AFU: 754 case INIT_STATE_AFU:
702 cancel_work_sync(&cfg->work_q);
703 term_afu(cfg); 755 term_afu(cfg);
704 case INIT_STATE_PCI: 756 case INIT_STATE_PCI:
705 pci_disable_device(pdev); 757 pci_disable_device(pdev);
@@ -709,7 +761,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
709 break; 761 break;
710 } 762 }
711 763
712 pr_debug("%s: returning\n", __func__); 764 dev_dbg(dev, "%s: returning\n", __func__);
713} 765}
714 766
715/** 767/**
@@ -727,7 +779,7 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
727 int rc = 0; 779 int rc = 0;
728 struct device *dev = &cfg->dev->dev; 780 struct device *dev = &cfg->dev->dev;
729 781
730 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ 782 /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
731 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 783 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
732 get_order(sizeof(struct afu))); 784 get_order(sizeof(struct afu)));
733 if (unlikely(!cfg->afu)) { 785 if (unlikely(!cfg->afu)) {
@@ -751,6 +803,7 @@ out:
751static int init_pci(struct cxlflash_cfg *cfg) 803static int init_pci(struct cxlflash_cfg *cfg)
752{ 804{
753 struct pci_dev *pdev = cfg->dev; 805 struct pci_dev *pdev = cfg->dev;
806 struct device *dev = &cfg->dev->dev;
754 int rc = 0; 807 int rc = 0;
755 808
756 rc = pci_enable_device(pdev); 809 rc = pci_enable_device(pdev);
@@ -761,15 +814,14 @@ static int init_pci(struct cxlflash_cfg *cfg)
761 } 814 }
762 815
763 if (rc) { 816 if (rc) {
764 dev_err(&pdev->dev, "%s: Cannot enable adapter\n", 817 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
765 __func__);
766 cxlflash_wait_for_pci_err_recovery(cfg); 818 cxlflash_wait_for_pci_err_recovery(cfg);
767 goto out; 819 goto out;
768 } 820 }
769 } 821 }
770 822
771out: 823out:
772 pr_debug("%s: returning rc=%d\n", __func__, rc); 824 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
773 return rc; 825 return rc;
774} 826}
775 827
@@ -782,19 +834,19 @@ out:
782static int init_scsi(struct cxlflash_cfg *cfg) 834static int init_scsi(struct cxlflash_cfg *cfg)
783{ 835{
784 struct pci_dev *pdev = cfg->dev; 836 struct pci_dev *pdev = cfg->dev;
837 struct device *dev = &cfg->dev->dev;
785 int rc = 0; 838 int rc = 0;
786 839
787 rc = scsi_add_host(cfg->host, &pdev->dev); 840 rc = scsi_add_host(cfg->host, &pdev->dev);
788 if (rc) { 841 if (rc) {
789 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", 842 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
790 __func__, rc);
791 goto out; 843 goto out;
792 } 844 }
793 845
794 scsi_scan_host(cfg->host); 846 scsi_scan_host(cfg->host);
795 847
796out: 848out:
797 pr_debug("%s: returning rc=%d\n", __func__, rc); 849 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
798 return rc; 850 return rc;
799} 851}
800 852
@@ -844,16 +896,12 @@ static void set_port_offline(__be64 __iomem *fc_regs)
844 * Return: 896 * Return:
845 * TRUE (1) when the specified port is online 897 * TRUE (1) when the specified port is online
846 * FALSE (0) when the specified port fails to come online after timeout 898 * FALSE (0) when the specified port fails to come online after timeout
847 * -EINVAL when @delay_us is less than 1000
848 */ 899 */
849static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 900static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
850{ 901{
851 u64 status; 902 u64 status;
852 903
853 if (delay_us < 1000) { 904 WARN_ON(delay_us < 1000);
854 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
855 return -EINVAL;
856 }
857 905
858 do { 906 do {
859 msleep(delay_us / 1000); 907 msleep(delay_us / 1000);
@@ -877,16 +925,12 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
877 * Return: 925 * Return:
878 * TRUE (1) when the specified port is offline 926 * TRUE (1) when the specified port is offline
879 * FALSE (0) when the specified port fails to go offline after timeout 927 * FALSE (0) when the specified port fails to go offline after timeout
880 * -EINVAL when @delay_us is less than 1000
881 */ 928 */
882static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 929static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
883{ 930{
884 u64 status; 931 u64 status;
885 932
886 if (delay_us < 1000) { 933 WARN_ON(delay_us < 1000);
887 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
888 return -EINVAL;
889 }
890 934
891 do { 935 do {
892 msleep(delay_us / 1000); 936 msleep(delay_us / 1000);
@@ -915,11 +959,14 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
915static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 959static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
916 u64 wwpn) 960 u64 wwpn)
917{ 961{
962 struct cxlflash_cfg *cfg = afu->parent;
963 struct device *dev = &cfg->dev->dev;
964
918 set_port_offline(fc_regs); 965 set_port_offline(fc_regs);
919 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 966 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
920 FC_PORT_STATUS_RETRY_CNT)) { 967 FC_PORT_STATUS_RETRY_CNT)) {
921 pr_debug("%s: wait on port %d to go offline timed out\n", 968 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
922 __func__, port); 969 __func__, port);
923 } 970 }
924 971
925 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); 972 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
@@ -927,8 +974,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
927 set_port_online(fc_regs); 974 set_port_online(fc_regs);
928 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 975 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
929 FC_PORT_STATUS_RETRY_CNT)) { 976 FC_PORT_STATUS_RETRY_CNT)) {
930 pr_debug("%s: wait on port %d to go online timed out\n", 977 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
931 __func__, port); 978 __func__, port);
932 } 979 }
933} 980}
934 981
@@ -947,6 +994,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
947 */ 994 */
948static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) 995static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
949{ 996{
997 struct cxlflash_cfg *cfg = afu->parent;
998 struct device *dev = &cfg->dev->dev;
950 u64 port_sel; 999 u64 port_sel;
951 1000
952 /* first switch the AFU to the other links, if any */ 1001 /* first switch the AFU to the other links, if any */
@@ -958,21 +1007,21 @@ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
958 set_port_offline(fc_regs); 1007 set_port_offline(fc_regs);
959 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1008 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
960 FC_PORT_STATUS_RETRY_CNT)) 1009 FC_PORT_STATUS_RETRY_CNT))
961 pr_err("%s: wait on port %d to go offline timed out\n", 1010 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
962 __func__, port); 1011 __func__, port);
963 1012
964 set_port_online(fc_regs); 1013 set_port_online(fc_regs);
965 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1014 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
966 FC_PORT_STATUS_RETRY_CNT)) 1015 FC_PORT_STATUS_RETRY_CNT))
967 pr_err("%s: wait on port %d to go online timed out\n", 1016 dev_err(dev, "%s: wait on port %d to go online timed out\n",
968 __func__, port); 1017 __func__, port);
969 1018
970 /* switch back to include this port */ 1019 /* switch back to include this port */
971 port_sel |= (1ULL << port); 1020 port_sel |= (1ULL << port);
972 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1021 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
973 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1022 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
974 1023
975 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); 1024 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
976} 1025}
977 1026
978/* 1027/*
@@ -1082,6 +1131,8 @@ static void afu_err_intr_init(struct afu *afu)
1082static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) 1131static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1083{ 1132{
1084 struct afu *afu = (struct afu *)data; 1133 struct afu *afu = (struct afu *)data;
1134 struct cxlflash_cfg *cfg = afu->parent;
1135 struct device *dev = &cfg->dev->dev;
1085 u64 reg; 1136 u64 reg;
1086 u64 reg_unmasked; 1137 u64 reg_unmasked;
1087 1138
@@ -1089,18 +1140,17 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1089 reg_unmasked = (reg & SISL_ISTATUS_UNMASK); 1140 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1090 1141
1091 if (reg_unmasked == 0UL) { 1142 if (reg_unmasked == 0UL) {
1092 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", 1143 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1093 __func__, (u64)afu, reg); 1144 __func__, reg);
1094 goto cxlflash_sync_err_irq_exit; 1145 goto cxlflash_sync_err_irq_exit;
1095 } 1146 }
1096 1147
1097 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", 1148 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1098 __func__, (u64)afu, reg); 1149 __func__, reg);
1099 1150
1100 writeq_be(reg_unmasked, &afu->host_map->intr_clear); 1151 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1101 1152
1102cxlflash_sync_err_irq_exit: 1153cxlflash_sync_err_irq_exit:
1103 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1104 return IRQ_HANDLED; 1154 return IRQ_HANDLED;
1105} 1155}
1106 1156
@@ -1115,6 +1165,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1115{ 1165{
1116 struct afu *afu = (struct afu *)data; 1166 struct afu *afu = (struct afu *)data;
1117 struct afu_cmd *cmd; 1167 struct afu_cmd *cmd;
1168 struct sisl_ioasa *ioasa;
1169 struct sisl_ioarcb *ioarcb;
1118 bool toggle = afu->toggle; 1170 bool toggle = afu->toggle;
1119 u64 entry, 1171 u64 entry,
1120 *hrrq_start = afu->hrrq_start, 1172 *hrrq_start = afu->hrrq_start,
@@ -1128,7 +1180,16 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1128 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) 1180 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1129 break; 1181 break;
1130 1182
1131 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); 1183 entry &= ~SISL_RESP_HANDLE_T_BIT;
1184
1185 if (afu_is_sq_cmd_mode(afu)) {
1186 ioasa = (struct sisl_ioasa *)entry;
1187 cmd = container_of(ioasa, struct afu_cmd, sa);
1188 } else {
1189 ioarcb = (struct sisl_ioarcb *)entry;
1190 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1191 }
1192
1132 cmd_complete(cmd); 1193 cmd_complete(cmd);
1133 1194
1134 /* Advance to next entry or wrap and flip the toggle bit */ 1195 /* Advance to next entry or wrap and flip the toggle bit */
@@ -1138,6 +1199,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1138 hrrq_curr = hrrq_start; 1199 hrrq_curr = hrrq_start;
1139 toggle ^= SISL_RESP_HANDLE_T_BIT; 1200 toggle ^= SISL_RESP_HANDLE_T_BIT;
1140 } 1201 }
1202
1203 atomic_inc(&afu->hsq_credits);
1141 } 1204 }
1142 1205
1143 afu->hrrq_curr = hrrq_curr; 1206 afu->hrrq_curr = hrrq_curr;
@@ -1169,7 +1232,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1169 reg_unmasked = (reg & SISL_ASTATUS_UNMASK); 1232 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1170 1233
1171 if (reg_unmasked == 0) { 1234 if (reg_unmasked == 0) {
1172 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", 1235 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1173 __func__, reg); 1236 __func__, reg);
1174 goto out; 1237 goto out;
1175 } 1238 }
@@ -1185,7 +1248,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1185 1248
1186 port = info->port; 1249 port = info->port;
1187 1250
1188 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", 1251 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1189 __func__, port, info->desc, 1252 __func__, port, info->desc,
1190 readq_be(&global->fc_regs[port][FC_STATUS / 8])); 1253 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1191 1254
@@ -1198,7 +1261,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1198 __func__, port); 1261 __func__, port);
1199 cfg->lr_state = LINK_RESET_REQUIRED; 1262 cfg->lr_state = LINK_RESET_REQUIRED;
1200 cfg->lr_port = port; 1263 cfg->lr_port = port;
1201 kref_get(&cfg->afu->mapcount);
1202 schedule_work(&cfg->work_q); 1264 schedule_work(&cfg->work_q);
1203 } 1265 }
1204 1266
@@ -1210,7 +1272,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1210 * should be the same and tracing one is sufficient. 1272 * should be the same and tracing one is sufficient.
1211 */ 1273 */
1212 1274
1213 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", 1275 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1214 __func__, port, reg); 1276 __func__, port, reg);
1215 1277
1216 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); 1278 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
@@ -1219,13 +1281,11 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1219 1281
1220 if (info->action & SCAN_HOST) { 1282 if (info->action & SCAN_HOST) {
1221 atomic_inc(&cfg->scan_host_needed); 1283 atomic_inc(&cfg->scan_host_needed);
1222 kref_get(&cfg->afu->mapcount);
1223 schedule_work(&cfg->work_q); 1284 schedule_work(&cfg->work_q);
1224 } 1285 }
1225 } 1286 }
1226 1287
1227out: 1288out:
1228 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1229 return IRQ_HANDLED; 1289 return IRQ_HANDLED;
1230} 1290}
1231 1291
@@ -1237,13 +1297,14 @@ out:
1237 */ 1297 */
1238static int start_context(struct cxlflash_cfg *cfg) 1298static int start_context(struct cxlflash_cfg *cfg)
1239{ 1299{
1300 struct device *dev = &cfg->dev->dev;
1240 int rc = 0; 1301 int rc = 0;
1241 1302
1242 rc = cxl_start_context(cfg->mcctx, 1303 rc = cxl_start_context(cfg->mcctx,
1243 cfg->afu->work.work_element_descriptor, 1304 cfg->afu->work.work_element_descriptor,
1244 NULL); 1305 NULL);
1245 1306
1246 pr_debug("%s: returning rc=%d\n", __func__, rc); 1307 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1247 return rc; 1308 return rc;
1248} 1309}
1249 1310
@@ -1256,7 +1317,8 @@ static int start_context(struct cxlflash_cfg *cfg)
1256 */ 1317 */
1257static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) 1318static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1258{ 1319{
1259 struct pci_dev *dev = cfg->dev; 1320 struct device *dev = &cfg->dev->dev;
1321 struct pci_dev *pdev = cfg->dev;
1260 int rc = 0; 1322 int rc = 0;
1261 int ro_start, ro_size, i, j, k; 1323 int ro_start, ro_size, i, j, k;
1262 ssize_t vpd_size; 1324 ssize_t vpd_size;
@@ -1265,10 +1327,10 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1265 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; 1327 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1266 1328
1267 /* Get the VPD data from the device */ 1329 /* Get the VPD data from the device */
1268 vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data)); 1330 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1269 if (unlikely(vpd_size <= 0)) { 1331 if (unlikely(vpd_size <= 0)) {
1270 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", 1332 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1271 __func__, vpd_size); 1333 __func__, vpd_size);
1272 rc = -ENODEV; 1334 rc = -ENODEV;
1273 goto out; 1335 goto out;
1274 } 1336 }
@@ -1277,8 +1339,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1277 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, 1339 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1278 PCI_VPD_LRDT_RO_DATA); 1340 PCI_VPD_LRDT_RO_DATA);
1279 if (unlikely(ro_start < 0)) { 1341 if (unlikely(ro_start < 0)) {
1280 dev_err(&dev->dev, "%s: VPD Read-only data not found\n", 1342 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1281 __func__);
1282 rc = -ENODEV; 1343 rc = -ENODEV;
1283 goto out; 1344 goto out;
1284 } 1345 }
@@ -1288,8 +1349,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1288 j = ro_size; 1349 j = ro_size;
1289 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1350 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1290 if (unlikely((i + j) > vpd_size)) { 1351 if (unlikely((i + j) > vpd_size)) {
1291 pr_debug("%s: Might need to read more VPD (%d > %ld)\n", 1352 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1292 __func__, (i + j), vpd_size); 1353 __func__, (i + j), vpd_size);
1293 ro_size = vpd_size - i; 1354 ro_size = vpd_size - i;
1294 } 1355 }
1295 1356
@@ -1307,8 +1368,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1307 1368
1308 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 1369 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1309 if (unlikely(i < 0)) { 1370 if (unlikely(i < 0)) {
1310 dev_err(&dev->dev, "%s: Port %d WWPN not found " 1371 dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
1311 "in VPD\n", __func__, k); 1372 __func__, k);
1312 rc = -ENODEV; 1373 rc = -ENODEV;
1313 goto out; 1374 goto out;
1314 } 1375 }
@@ -1316,9 +1377,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1316 j = pci_vpd_info_field_size(&vpd_data[i]); 1377 j = pci_vpd_info_field_size(&vpd_data[i]);
1317 i += PCI_VPD_INFO_FLD_HDR_SIZE; 1378 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1318 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { 1379 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1319 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " 1380 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1320 "VPD corrupt\n", 1381 __func__, k);
1321 __func__, k);
1322 rc = -ENODEV; 1382 rc = -ENODEV;
1323 goto out; 1383 goto out;
1324 } 1384 }
@@ -1326,15 +1386,15 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1326 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); 1386 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1327 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); 1387 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1328 if (unlikely(rc)) { 1388 if (unlikely(rc)) {
1329 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " 1389 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1330 "to integer\n", __func__, k); 1390 __func__, k);
1331 rc = -ENODEV; 1391 rc = -ENODEV;
1332 goto out; 1392 goto out;
1333 } 1393 }
1334 } 1394 }
1335 1395
1336out: 1396out:
1337 pr_debug("%s: returning rc=%d\n", __func__, rc); 1397 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1338 return rc; 1398 return rc;
1339} 1399}
1340 1400
@@ -1388,12 +1448,18 @@ static int init_global(struct cxlflash_cfg *cfg)
1388 goto out; 1448 goto out;
1389 } 1449 }
1390 1450
1391 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); 1451 dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n",
1452 __func__, wwpn[0], wwpn[1]);
1392 1453
1393 /* Set up RRQ in AFU for master issued cmds */ 1454 /* Set up RRQ and SQ in AFU for master issued cmds */
1394 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); 1455 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1395 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); 1456 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1396 1457
1458 if (afu_is_sq_cmd_mode(afu)) {
1459 writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start);
1460 writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end);
1461 }
1462
1397 /* AFU configuration */ 1463 /* AFU configuration */
1398 reg = readq_be(&afu->afu_map->global.regs.afu_config); 1464 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1399 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; 1465 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
@@ -1443,7 +1509,6 @@ static int init_global(struct cxlflash_cfg *cfg)
1443 &afu->ctrl_map->ctx_cap); 1509 &afu->ctrl_map->ctx_cap);
1444 /* Initialize heartbeat */ 1510 /* Initialize heartbeat */
1445 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1511 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1446
1447out: 1512out:
1448 return rc; 1513 return rc;
1449} 1514}
@@ -1455,6 +1520,7 @@ out:
1455static int start_afu(struct cxlflash_cfg *cfg) 1520static int start_afu(struct cxlflash_cfg *cfg)
1456{ 1521{
1457 struct afu *afu = cfg->afu; 1522 struct afu *afu = cfg->afu;
1523 struct device *dev = &cfg->dev->dev;
1458 int rc = 0; 1524 int rc = 0;
1459 1525
1460 init_pcr(cfg); 1526 init_pcr(cfg);
@@ -1468,9 +1534,20 @@ static int start_afu(struct cxlflash_cfg *cfg)
1468 afu->hrrq_curr = afu->hrrq_start; 1534 afu->hrrq_curr = afu->hrrq_start;
1469 afu->toggle = 1; 1535 afu->toggle = 1;
1470 1536
1537 /* Initialize SQ */
1538 if (afu_is_sq_cmd_mode(afu)) {
1539 memset(&afu->sq, 0, sizeof(afu->sq));
1540 afu->hsq_start = &afu->sq[0];
1541 afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1];
1542 afu->hsq_curr = afu->hsq_start;
1543
1544 spin_lock_init(&afu->hsq_slock);
1545 atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
1546 }
1547
1471 rc = init_global(cfg); 1548 rc = init_global(cfg);
1472 1549
1473 pr_debug("%s: returning rc=%d\n", __func__, rc); 1550 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1474 return rc; 1551 return rc;
1475} 1552}
1476 1553
@@ -1490,7 +1567,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1490 1567
1491 rc = cxl_allocate_afu_irqs(ctx, 3); 1568 rc = cxl_allocate_afu_irqs(ctx, 3);
1492 if (unlikely(rc)) { 1569 if (unlikely(rc)) {
1493 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", 1570 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1494 __func__, rc); 1571 __func__, rc);
1495 level = UNDO_NOOP; 1572 level = UNDO_NOOP;
1496 goto out; 1573 goto out;
@@ -1499,8 +1576,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1499 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, 1576 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1500 "SISL_MSI_SYNC_ERROR"); 1577 "SISL_MSI_SYNC_ERROR");
1501 if (unlikely(rc <= 0)) { 1578 if (unlikely(rc <= 0)) {
1502 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", 1579 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1503 __func__);
1504 level = FREE_IRQ; 1580 level = FREE_IRQ;
1505 goto out; 1581 goto out;
1506 } 1582 }
@@ -1508,8 +1584,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1508 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, 1584 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1509 "SISL_MSI_RRQ_UPDATED"); 1585 "SISL_MSI_RRQ_UPDATED");
1510 if (unlikely(rc <= 0)) { 1586 if (unlikely(rc <= 0)) {
1511 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", 1587 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1512 __func__);
1513 level = UNMAP_ONE; 1588 level = UNMAP_ONE;
1514 goto out; 1589 goto out;
1515 } 1590 }
@@ -1517,8 +1592,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1517 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, 1592 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1518 "SISL_MSI_ASYNC_ERROR"); 1593 "SISL_MSI_ASYNC_ERROR");
1519 if (unlikely(rc <= 0)) { 1594 if (unlikely(rc <= 0)) {
1520 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", 1595 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1521 __func__);
1522 level = UNMAP_TWO; 1596 level = UNMAP_TWO;
1523 goto out; 1597 goto out;
1524 } 1598 }
@@ -1552,15 +1626,13 @@ static int init_mc(struct cxlflash_cfg *cfg)
1552 /* During initialization reset the AFU to start from a clean slate */ 1626 /* During initialization reset the AFU to start from a clean slate */
1553 rc = cxl_afu_reset(cfg->mcctx); 1627 rc = cxl_afu_reset(cfg->mcctx);
1554 if (unlikely(rc)) { 1628 if (unlikely(rc)) {
1555 dev_err(dev, "%s: initial AFU reset failed rc=%d\n", 1629 dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc);
1556 __func__, rc);
1557 goto ret; 1630 goto ret;
1558 } 1631 }
1559 1632
1560 level = init_intr(cfg, ctx); 1633 level = init_intr(cfg, ctx);
1561 if (unlikely(level)) { 1634 if (unlikely(level)) {
1562 dev_err(dev, "%s: setting up interrupts failed rc=%d\n", 1635 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
1563 __func__, rc);
1564 goto out; 1636 goto out;
1565 } 1637 }
1566 1638
@@ -1575,7 +1647,7 @@ static int init_mc(struct cxlflash_cfg *cfg)
1575 goto out; 1647 goto out;
1576 } 1648 }
1577ret: 1649ret:
1578 pr_debug("%s: returning rc=%d\n", __func__, rc); 1650 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1579 return rc; 1651 return rc;
1580out: 1652out:
1581 term_intr(cfg, level); 1653 term_intr(cfg, level);
@@ -1602,7 +1674,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
1602 1674
1603 rc = init_mc(cfg); 1675 rc = init_mc(cfg);
1604 if (rc) { 1676 if (rc) {
1605 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", 1677 dev_err(dev, "%s: init_mc failed rc=%d\n",
1606 __func__, rc); 1678 __func__, rc);
1607 goto out; 1679 goto out;
1608 } 1680 }
@@ -1610,11 +1682,10 @@ static int init_afu(struct cxlflash_cfg *cfg)
1610 /* Map the entire MMIO space of the AFU */ 1682 /* Map the entire MMIO space of the AFU */
1611 afu->afu_map = cxl_psa_map(cfg->mcctx); 1683 afu->afu_map = cxl_psa_map(cfg->mcctx);
1612 if (!afu->afu_map) { 1684 if (!afu->afu_map) {
1613 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); 1685 dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
1614 rc = -ENOMEM; 1686 rc = -ENOMEM;
1615 goto err1; 1687 goto err1;
1616 } 1688 }
1617 kref_init(&afu->mapcount);
1618 1689
1619 /* No byte reverse on reading afu_version or string will be backwards */ 1690 /* No byte reverse on reading afu_version or string will be backwards */
1620 reg = readq(&afu->afu_map->global.regs.afu_version); 1691 reg = readq(&afu->afu_map->global.regs.afu_version);
@@ -1622,24 +1693,28 @@ static int init_afu(struct cxlflash_cfg *cfg)
1622 afu->interface_version = 1693 afu->interface_version =
1623 readq_be(&afu->afu_map->global.regs.interface_version); 1694 readq_be(&afu->afu_map->global.regs.interface_version);
1624 if ((afu->interface_version + 1) == 0) { 1695 if ((afu->interface_version + 1) == 0) {
1625 pr_err("Back level AFU, please upgrade. AFU version %s " 1696 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
1626 "interface version 0x%llx\n", afu->version, 1697 "interface version %016llx\n", afu->version,
1627 afu->interface_version); 1698 afu->interface_version);
1628 rc = -EINVAL; 1699 rc = -EINVAL;
1629 goto err2; 1700 goto err1;
1630 } 1701 }
1631 1702
1632 afu->send_cmd = send_cmd_ioarrin; 1703 if (afu_is_sq_cmd_mode(afu)) {
1633 afu->context_reset = context_reset_ioarrin; 1704 afu->send_cmd = send_cmd_sq;
1705 afu->context_reset = context_reset_sq;
1706 } else {
1707 afu->send_cmd = send_cmd_ioarrin;
1708 afu->context_reset = context_reset_ioarrin;
1709 }
1634 1710
1635 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, 1711 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
1636 afu->version, afu->interface_version); 1712 afu->version, afu->interface_version);
1637 1713
1638 rc = start_afu(cfg); 1714 rc = start_afu(cfg);
1639 if (rc) { 1715 if (rc) {
1640 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", 1716 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
1641 __func__, rc); 1717 goto err1;
1642 goto err2;
1643 } 1718 }
1644 1719
1645 afu_err_intr_init(cfg->afu); 1720 afu_err_intr_init(cfg->afu);
@@ -1649,11 +1724,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
1649 /* Restore the LUN mappings */ 1724 /* Restore the LUN mappings */
1650 cxlflash_restore_luntable(cfg); 1725 cxlflash_restore_luntable(cfg);
1651out: 1726out:
1652 pr_debug("%s: returning rc=%d\n", __func__, rc); 1727 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1653 return rc; 1728 return rc;
1654 1729
1655err2:
1656 kref_put(&afu->mapcount, afu_unmap);
1657err1: 1730err1:
1658 term_intr(cfg, UNMAP_THREE); 1731 term_intr(cfg, UNMAP_THREE);
1659 term_mc(cfg); 1732 term_mc(cfg);
@@ -1693,7 +1766,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1693 static DEFINE_MUTEX(sync_active); 1766 static DEFINE_MUTEX(sync_active);
1694 1767
1695 if (cfg->state != STATE_NORMAL) { 1768 if (cfg->state != STATE_NORMAL) {
1696 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); 1769 dev_dbg(dev, "%s: Sync not required state=%u\n",
1770 __func__, cfg->state);
1697 return 0; 1771 return 0;
1698 } 1772 }
1699 1773
@@ -1710,7 +1784,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1710 init_completion(&cmd->cevent); 1784 init_completion(&cmd->cevent);
1711 cmd->parent = afu; 1785 cmd->parent = afu;
1712 1786
1713 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); 1787 dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1714 1788
1715 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 1789 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1716 cmd->rcb.ctx_id = afu->ctx_hndl; 1790 cmd->rcb.ctx_id = afu->ctx_hndl;
@@ -1735,7 +1809,7 @@ out:
1735 atomic_dec(&afu->cmds_active); 1809 atomic_dec(&afu->cmds_active);
1736 mutex_unlock(&sync_active); 1810 mutex_unlock(&sync_active);
1737 kfree(buf); 1811 kfree(buf);
1738 pr_debug("%s: returning rc=%d\n", __func__, rc); 1812 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1739 return rc; 1813 return rc;
1740} 1814}
1741 1815
@@ -1747,16 +1821,17 @@ out:
1747 */ 1821 */
1748static int afu_reset(struct cxlflash_cfg *cfg) 1822static int afu_reset(struct cxlflash_cfg *cfg)
1749{ 1823{
1824 struct device *dev = &cfg->dev->dev;
1750 int rc = 0; 1825 int rc = 0;
1826
1751 /* Stop the context before the reset. Since the context is 1827 /* Stop the context before the reset. Since the context is
1752 * no longer available restart it after the reset is complete 1828 * no longer available restart it after the reset is complete
1753 */ 1829 */
1754
1755 term_afu(cfg); 1830 term_afu(cfg);
1756 1831
1757 rc = init_afu(cfg); 1832 rc = init_afu(cfg);
1758 1833
1759 pr_debug("%s: returning rc=%d\n", __func__, rc); 1834 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1760 return rc; 1835 return rc;
1761} 1836}
1762 1837
@@ -1785,18 +1860,18 @@ static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1785{ 1860{
1786 int rc = SUCCESS; 1861 int rc = SUCCESS;
1787 struct Scsi_Host *host = scp->device->host; 1862 struct Scsi_Host *host = scp->device->host;
1788 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 1863 struct cxlflash_cfg *cfg = shost_priv(host);
1864 struct device *dev = &cfg->dev->dev;
1789 struct afu *afu = cfg->afu; 1865 struct afu *afu = cfg->afu;
1790 int rcr = 0; 1866 int rcr = 0;
1791 1867
1792 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 1868 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
1793 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 1869 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
1794 host->host_no, scp->device->channel, 1870 scp->device->channel, scp->device->id, scp->device->lun,
1795 scp->device->id, scp->device->lun, 1871 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1796 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 1872 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1797 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 1873 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1798 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 1874 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1799 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1800 1875
1801retry: 1876retry:
1802 switch (cfg->state) { 1877 switch (cfg->state) {
@@ -1813,7 +1888,7 @@ retry:
1813 break; 1888 break;
1814 } 1889 }
1815 1890
1816 pr_debug("%s: returning rc=%d\n", __func__, rc); 1891 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1817 return rc; 1892 return rc;
1818} 1893}
1819 1894
@@ -1835,16 +1910,16 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1835 int rc = SUCCESS; 1910 int rc = SUCCESS;
1836 int rcr = 0; 1911 int rcr = 0;
1837 struct Scsi_Host *host = scp->device->host; 1912 struct Scsi_Host *host = scp->device->host;
1838 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 1913 struct cxlflash_cfg *cfg = shost_priv(host);
1914 struct device *dev = &cfg->dev->dev;
1839 1915
1840 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 1916 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
1841 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 1917 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
1842 host->host_no, scp->device->channel, 1918 scp->device->channel, scp->device->id, scp->device->lun,
1843 scp->device->id, scp->device->lun, 1919 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1844 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 1920 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1845 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 1921 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1846 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 1922 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1847 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1848 1923
1849 switch (cfg->state) { 1924 switch (cfg->state) {
1850 case STATE_NORMAL: 1925 case STATE_NORMAL:
@@ -1870,7 +1945,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1870 break; 1945 break;
1871 } 1946 }
1872 1947
1873 pr_debug("%s: returning rc=%d\n", __func__, rc); 1948 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1874 return rc; 1949 return rc;
1875} 1950}
1876 1951
@@ -1936,8 +2011,7 @@ static ssize_t port0_show(struct device *dev,
1936 struct device_attribute *attr, 2011 struct device_attribute *attr,
1937 char *buf) 2012 char *buf)
1938{ 2013{
1939 struct Scsi_Host *shost = class_to_shost(dev); 2014 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
1940 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1941 struct afu *afu = cfg->afu; 2015 struct afu *afu = cfg->afu;
1942 2016
1943 return cxlflash_show_port_status(0, afu, buf); 2017 return cxlflash_show_port_status(0, afu, buf);
@@ -1955,8 +2029,7 @@ static ssize_t port1_show(struct device *dev,
1955 struct device_attribute *attr, 2029 struct device_attribute *attr,
1956 char *buf) 2030 char *buf)
1957{ 2031{
1958 struct Scsi_Host *shost = class_to_shost(dev); 2032 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
1959 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1960 struct afu *afu = cfg->afu; 2033 struct afu *afu = cfg->afu;
1961 2034
1962 return cxlflash_show_port_status(1, afu, buf); 2035 return cxlflash_show_port_status(1, afu, buf);
@@ -1973,8 +2046,7 @@ static ssize_t port1_show(struct device *dev,
1973static ssize_t lun_mode_show(struct device *dev, 2046static ssize_t lun_mode_show(struct device *dev,
1974 struct device_attribute *attr, char *buf) 2047 struct device_attribute *attr, char *buf)
1975{ 2048{
1976 struct Scsi_Host *shost = class_to_shost(dev); 2049 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
1977 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1978 struct afu *afu = cfg->afu; 2050 struct afu *afu = cfg->afu;
1979 2051
1980 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); 2052 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
@@ -2007,7 +2079,7 @@ static ssize_t lun_mode_store(struct device *dev,
2007 const char *buf, size_t count) 2079 const char *buf, size_t count)
2008{ 2080{
2009 struct Scsi_Host *shost = class_to_shost(dev); 2081 struct Scsi_Host *shost = class_to_shost(dev);
2010 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2082 struct cxlflash_cfg *cfg = shost_priv(shost);
2011 struct afu *afu = cfg->afu; 2083 struct afu *afu = cfg->afu;
2012 int rc; 2084 int rc;
2013 u32 lun_mode; 2085 u32 lun_mode;
@@ -2069,7 +2141,7 @@ static ssize_t cxlflash_show_port_lun_table(u32 port,
2069 2141
2070 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) 2142 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2071 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 2143 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2072 "%03d: %016llX\n", i, readq_be(&fc_port[i])); 2144 "%03d: %016llx\n", i, readq_be(&fc_port[i]));
2073 return bytes; 2145 return bytes;
2074} 2146}
2075 2147
@@ -2085,8 +2157,7 @@ static ssize_t port0_lun_table_show(struct device *dev,
2085 struct device_attribute *attr, 2157 struct device_attribute *attr,
2086 char *buf) 2158 char *buf)
2087{ 2159{
2088 struct Scsi_Host *shost = class_to_shost(dev); 2160 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2089 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2090 struct afu *afu = cfg->afu; 2161 struct afu *afu = cfg->afu;
2091 2162
2092 return cxlflash_show_port_lun_table(0, afu, buf); 2163 return cxlflash_show_port_lun_table(0, afu, buf);
@@ -2104,8 +2175,7 @@ static ssize_t port1_lun_table_show(struct device *dev,
2104 struct device_attribute *attr, 2175 struct device_attribute *attr,
2105 char *buf) 2176 char *buf)
2106{ 2177{
2107 struct Scsi_Host *shost = class_to_shost(dev); 2178 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2108 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2109 struct afu *afu = cfg->afu; 2179 struct afu *afu = cfg->afu;
2110 2180
2111 return cxlflash_show_port_lun_table(1, afu, buf); 2181 return cxlflash_show_port_lun_table(1, afu, buf);
@@ -2250,7 +2320,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
2250 2320
2251 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 2321 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2252 scsi_scan_host(cfg->host); 2322 scsi_scan_host(cfg->host);
2253 kref_put(&afu->mapcount, afu_unmap);
2254} 2323}
2255 2324
2256/** 2325/**
@@ -2265,6 +2334,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
2265{ 2334{
2266 struct Scsi_Host *host; 2335 struct Scsi_Host *host;
2267 struct cxlflash_cfg *cfg = NULL; 2336 struct cxlflash_cfg *cfg = NULL;
2337 struct device *dev = &pdev->dev;
2268 struct dev_dependent_vals *ddv; 2338 struct dev_dependent_vals *ddv;
2269 int rc = 0; 2339 int rc = 0;
2270 2340
@@ -2276,8 +2346,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
2276 2346
2277 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); 2347 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2278 if (!host) { 2348 if (!host) {
2279 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", 2349 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
2280 __func__);
2281 rc = -ENOMEM; 2350 rc = -ENOMEM;
2282 goto out; 2351 goto out;
2283 } 2352 }
@@ -2288,12 +2357,11 @@ static int cxlflash_probe(struct pci_dev *pdev,
2288 host->unique_id = host->host_no; 2357 host->unique_id = host->host_no;
2289 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 2358 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2290 2359
2291 cfg = (struct cxlflash_cfg *)host->hostdata; 2360 cfg = shost_priv(host);
2292 cfg->host = host; 2361 cfg->host = host;
2293 rc = alloc_mem(cfg); 2362 rc = alloc_mem(cfg);
2294 if (rc) { 2363 if (rc) {
2295 dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n", 2364 dev_err(dev, "%s: alloc_mem failed\n", __func__);
2296 __func__);
2297 rc = -ENOMEM; 2365 rc = -ENOMEM;
2298 scsi_host_put(cfg->host); 2366 scsi_host_put(cfg->host);
2299 goto out; 2367 goto out;
@@ -2334,30 +2402,27 @@ static int cxlflash_probe(struct pci_dev *pdev,
2334 2402
2335 rc = init_pci(cfg); 2403 rc = init_pci(cfg);
2336 if (rc) { 2404 if (rc) {
2337 dev_err(&pdev->dev, "%s: call to init_pci " 2405 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
2338 "failed rc=%d!\n", __func__, rc);
2339 goto out_remove; 2406 goto out_remove;
2340 } 2407 }
2341 cfg->init_state = INIT_STATE_PCI; 2408 cfg->init_state = INIT_STATE_PCI;
2342 2409
2343 rc = init_afu(cfg); 2410 rc = init_afu(cfg);
2344 if (rc) { 2411 if (rc) {
2345 dev_err(&pdev->dev, "%s: call to init_afu " 2412 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
2346 "failed rc=%d!\n", __func__, rc);
2347 goto out_remove; 2413 goto out_remove;
2348 } 2414 }
2349 cfg->init_state = INIT_STATE_AFU; 2415 cfg->init_state = INIT_STATE_AFU;
2350 2416
2351 rc = init_scsi(cfg); 2417 rc = init_scsi(cfg);
2352 if (rc) { 2418 if (rc) {
2353 dev_err(&pdev->dev, "%s: call to init_scsi " 2419 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
2354 "failed rc=%d!\n", __func__, rc);
2355 goto out_remove; 2420 goto out_remove;
2356 } 2421 }
2357 cfg->init_state = INIT_STATE_SCSI; 2422 cfg->init_state = INIT_STATE_SCSI;
2358 2423
2359out: 2424out:
2360 pr_debug("%s: returning rc=%d\n", __func__, rc); 2425 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2361 return rc; 2426 return rc;
2362 2427
2363out_remove: 2428out_remove:
@@ -2395,7 +2460,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2395 drain_ioctls(cfg); 2460 drain_ioctls(cfg);
2396 rc = cxlflash_mark_contexts_error(cfg); 2461 rc = cxlflash_mark_contexts_error(cfg);
2397 if (unlikely(rc)) 2462 if (unlikely(rc))
2398 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", 2463 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
2399 __func__, rc); 2464 __func__, rc);
2400 term_afu(cfg); 2465 term_afu(cfg);
2401 return PCI_ERS_RESULT_NEED_RESET; 2466 return PCI_ERS_RESULT_NEED_RESET;
@@ -2429,7 +2494,7 @@ static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2429 2494
2430 rc = init_afu(cfg); 2495 rc = init_afu(cfg);
2431 if (unlikely(rc)) { 2496 if (unlikely(rc)) {
2432 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); 2497 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
2433 return PCI_ERS_RESULT_DISCONNECT; 2498 return PCI_ERS_RESULT_DISCONNECT;
2434 } 2499 }
2435 2500
@@ -2477,8 +2542,6 @@ static struct pci_driver cxlflash_driver = {
2477 */ 2542 */
2478static int __init init_cxlflash(void) 2543static int __init init_cxlflash(void)
2479{ 2544{
2480 pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
2481
2482 cxlflash_list_init(); 2545 cxlflash_list_init();
2483 2546
2484 return pci_register_driver(&cxlflash_driver); 2547 return pci_register_driver(&cxlflash_driver);
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 1a2d09c148b3..a6e48a893fef 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,10 @@ struct sisl_ioarcb {
72 u16 timeout; /* in units specified by req_flags */ 72 u16 timeout; /* in units specified by req_flags */
73 u32 rsvd1; 73 u32 rsvd1;
74 u8 cdb[16]; /* must be in big endian */ 74 u8 cdb[16]; /* must be in big endian */
75 u64 reserved; /* Reserved area */ 75 union {
76 u64 reserved; /* Reserved for IOARRIN mode */
77 struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
78 };
76} __packed; 79} __packed;
77 80
78struct sisl_rc { 81struct sisl_rc {
@@ -260,6 +263,11 @@ struct sisl_host_map {
260 __be64 cmd_room; 263 __be64 cmd_room;
261 __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ 264 __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
262 __be64 mbox_w; /* restricted use */ 265 __be64 mbox_w; /* restricted use */
266 __be64 sq_start; /* Submission Queue (R/W): write sequence and */
267 __be64 sq_end; /* inclusion semantics are the same as RRQ */
268 __be64 sq_head; /* Submission Queue Head (R): for debugging */
269 __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */
270 __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */
263}; 271};
264 272
265/* per context provisioning & control MMIO */ 273/* per context provisioning & control MMIO */
@@ -348,6 +356,15 @@ struct sisl_global_regs {
348 __be64 rsvd[0xf8]; 356 __be64 rsvd[0xf8];
349 __le64 afu_version; 357 __le64 afu_version;
350 __be64 interface_version; 358 __be64 interface_version;
359#define SISL_INTVER_CAP_SHIFT 16
360#define SISL_INTVER_MAJ_SHIFT 8
361#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL
362#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL
363#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL
364#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL
365#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
366#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
367#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
351}; 368};
352 369
353#define CXLFLASH_NUM_FC_PORTS 2 370#define CXLFLASH_NUM_FC_PORTS 2
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 9636970d9611..90869cee2b20 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -212,7 +212,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
212 } 212 }
213 213
214out: 214out:
215 dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u " 215 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
216 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, 216 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
217 ctx_ctrl); 217 ctx_ctrl);
218 218
@@ -260,7 +260,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
260 writeq_be(val, &ctrl_map->ctx_cap); 260 writeq_be(val, &ctrl_map->ctx_cap);
261 val = readq_be(&ctrl_map->ctx_cap); 261 val = readq_be(&ctrl_map->ctx_cap);
262 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { 262 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
263 dev_err(dev, "%s: ctx may be closed val=%016llX\n", 263 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
264 __func__, val); 264 __func__, val);
265 rc = -EAGAIN; 265 rc = -EAGAIN;
266 goto out; 266 goto out;
@@ -302,7 +302,7 @@ out:
302 */ 302 */
303static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) 303static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
304{ 304{
305 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 305 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
306 struct device *dev = &cfg->dev->dev; 306 struct device *dev = &cfg->dev->dev;
307 struct glun_info *gli = lli->parent; 307 struct glun_info *gli = lli->parent;
308 u8 *cmd_buf = NULL; 308 u8 *cmd_buf = NULL;
@@ -326,7 +326,7 @@ retry:
326 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ 326 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
327 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); 327 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
328 328
329 dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__, 329 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
330 retry_cnt ? "re" : "", scsi_cmd[0]); 330 retry_cnt ? "re" : "", scsi_cmd[0]);
331 331
332 /* Drop the ioctl read semahpore across lengthy call */ 332 /* Drop the ioctl read semahpore across lengthy call */
@@ -336,7 +336,7 @@ retry:
336 down_read(&cfg->ioctl_rwsem); 336 down_read(&cfg->ioctl_rwsem);
337 rc = check_state(cfg); 337 rc = check_state(cfg);
338 if (rc) { 338 if (rc) {
339 dev_err(dev, "%s: Failed state! result=0x08%X\n", 339 dev_err(dev, "%s: Failed state result=%08x\n",
340 __func__, result); 340 __func__, result);
341 rc = -ENODEV; 341 rc = -ENODEV;
342 goto out; 342 goto out;
@@ -378,7 +378,7 @@ retry:
378 } 378 }
379 379
380 if (result) { 380 if (result) {
381 dev_err(dev, "%s: command failed, result=0x%x\n", 381 dev_err(dev, "%s: command failed, result=%08x\n",
382 __func__, result); 382 __func__, result);
383 rc = -EIO; 383 rc = -EIO;
384 goto out; 384 goto out;
@@ -415,29 +415,32 @@ out:
415struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, 415struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
416 struct llun_info *lli) 416 struct llun_info *lli)
417{ 417{
418 struct cxlflash_cfg *cfg = ctxi->cfg;
419 struct device *dev = &cfg->dev->dev;
418 struct sisl_rht_entry *rhte = NULL; 420 struct sisl_rht_entry *rhte = NULL;
419 421
420 if (unlikely(!ctxi->rht_start)) { 422 if (unlikely(!ctxi->rht_start)) {
421 pr_debug("%s: Context does not have allocated RHT!\n", 423 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
422 __func__); 424 __func__);
423 goto out; 425 goto out;
424 } 426 }
425 427
426 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { 428 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
427 pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl); 429 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
430 __func__, rhndl);
428 goto out; 431 goto out;
429 } 432 }
430 433
431 if (unlikely(ctxi->rht_lun[rhndl] != lli)) { 434 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
432 pr_debug("%s: Bad resource handle LUN! (%d)\n", 435 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
433 __func__, rhndl); 436 __func__, rhndl);
434 goto out; 437 goto out;
435 } 438 }
436 439
437 rhte = &ctxi->rht_start[rhndl]; 440 rhte = &ctxi->rht_start[rhndl];
438 if (unlikely(rhte->nmask == 0)) { 441 if (unlikely(rhte->nmask == 0)) {
439 pr_debug("%s: Unopened resource handle! (%d)\n", 442 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
440 __func__, rhndl); 443 __func__, rhndl);
441 rhte = NULL; 444 rhte = NULL;
442 goto out; 445 goto out;
443 } 446 }
@@ -456,6 +459,8 @@ out:
456struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, 459struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
457 struct llun_info *lli) 460 struct llun_info *lli)
458{ 461{
462 struct cxlflash_cfg *cfg = ctxi->cfg;
463 struct device *dev = &cfg->dev->dev;
459 struct sisl_rht_entry *rhte = NULL; 464 struct sisl_rht_entry *rhte = NULL;
460 int i; 465 int i;
461 466
@@ -470,7 +475,7 @@ struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
470 if (likely(rhte)) 475 if (likely(rhte))
471 ctxi->rht_lun[i] = lli; 476 ctxi->rht_lun[i] = lli;
472 477
473 pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i); 478 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
474 return rhte; 479 return rhte;
475} 480}
476 481
@@ -547,7 +552,7 @@ int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
547 if (gli->mode == MODE_NONE) 552 if (gli->mode == MODE_NONE)
548 gli->mode = mode; 553 gli->mode = mode;
549 else if (gli->mode != mode) { 554 else if (gli->mode != mode) {
550 pr_debug("%s: LUN operating in mode %d, requested mode %d\n", 555 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
551 __func__, gli->mode, mode); 556 __func__, gli->mode, mode);
552 rc = -EINVAL; 557 rc = -EINVAL;
553 goto out; 558 goto out;
@@ -605,7 +610,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
605 struct ctx_info *ctxi, 610 struct ctx_info *ctxi,
606 struct dk_cxlflash_release *release) 611 struct dk_cxlflash_release *release)
607{ 612{
608 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 613 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
609 struct device *dev = &cfg->dev->dev; 614 struct device *dev = &cfg->dev->dev;
610 struct llun_info *lli = sdev->hostdata; 615 struct llun_info *lli = sdev->hostdata;
611 struct glun_info *gli = lli->parent; 616 struct glun_info *gli = lli->parent;
@@ -622,13 +627,13 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
622 struct sisl_rht_entry *rhte; 627 struct sisl_rht_entry *rhte;
623 struct sisl_rht_entry_f1 *rhte_f1; 628 struct sisl_rht_entry_f1 *rhte_f1;
624 629
625 dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n", 630 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
626 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); 631 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
627 632
628 if (!ctxi) { 633 if (!ctxi) {
629 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 634 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
630 if (unlikely(!ctxi)) { 635 if (unlikely(!ctxi)) {
631 dev_dbg(dev, "%s: Bad context! (%llu)\n", 636 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
632 __func__, ctxid); 637 __func__, ctxid);
633 rc = -EINVAL; 638 rc = -EINVAL;
634 goto out; 639 goto out;
@@ -639,7 +644,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
639 644
640 rhte = get_rhte(ctxi, rhndl, lli); 645 rhte = get_rhte(ctxi, rhndl, lli);
641 if (unlikely(!rhte)) { 646 if (unlikely(!rhte)) {
642 dev_dbg(dev, "%s: Bad resource handle! (%d)\n", 647 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
643 __func__, rhndl); 648 __func__, rhndl);
644 rc = -EINVAL; 649 rc = -EINVAL;
645 goto out; 650 goto out;
@@ -758,13 +763,13 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
758 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); 763 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
759 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); 764 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
760 if (unlikely(!ctxi || !lli || !ws)) { 765 if (unlikely(!ctxi || !lli || !ws)) {
761 dev_err(dev, "%s: Unable to allocate context!\n", __func__); 766 dev_err(dev, "%s: Unable to allocate context\n", __func__);
762 goto err; 767 goto err;
763 } 768 }
764 769
765 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); 770 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
766 if (unlikely(!rhte)) { 771 if (unlikely(!rhte)) {
767 dev_err(dev, "%s: Unable to allocate RHT!\n", __func__); 772 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
768 goto err; 773 goto err;
769 } 774 }
770 775
@@ -858,7 +863,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
858 struct ctx_info *ctxi, 863 struct ctx_info *ctxi,
859 struct dk_cxlflash_detach *detach) 864 struct dk_cxlflash_detach *detach)
860{ 865{
861 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 866 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
862 struct device *dev = &cfg->dev->dev; 867 struct device *dev = &cfg->dev->dev;
863 struct llun_info *lli = sdev->hostdata; 868 struct llun_info *lli = sdev->hostdata;
864 struct lun_access *lun_access, *t; 869 struct lun_access *lun_access, *t;
@@ -875,7 +880,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
875 if (!ctxi) { 880 if (!ctxi) {
876 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 881 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
877 if (unlikely(!ctxi)) { 882 if (unlikely(!ctxi)) {
878 dev_dbg(dev, "%s: Bad context! (%llu)\n", 883 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
879 __func__, ctxid); 884 __func__, ctxid);
880 rc = -EINVAL; 885 rc = -EINVAL;
881 goto out; 886 goto out;
@@ -964,7 +969,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
964 969
965 ctxid = cxl_process_element(ctx); 970 ctxid = cxl_process_element(ctx);
966 if (unlikely(ctxid < 0)) { 971 if (unlikely(ctxid < 0)) {
967 dev_err(dev, "%s: Context %p was closed! (%d)\n", 972 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
968 __func__, ctx, ctxid); 973 __func__, ctx, ctxid);
969 goto out; 974 goto out;
970 } 975 }
@@ -973,18 +978,18 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
973 if (unlikely(!ctxi)) { 978 if (unlikely(!ctxi)) {
974 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); 979 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
975 if (!ctxi) { 980 if (!ctxi) {
976 dev_dbg(dev, "%s: Context %d already free!\n", 981 dev_dbg(dev, "%s: ctxid=%d already free\n",
977 __func__, ctxid); 982 __func__, ctxid);
978 goto out_release; 983 goto out_release;
979 } 984 }
980 985
981 dev_dbg(dev, "%s: Another process owns context %d!\n", 986 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
982 __func__, ctxid); 987 __func__, ctxid);
983 put_context(ctxi); 988 put_context(ctxi);
984 goto out; 989 goto out;
985 } 990 }
986 991
987 dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid); 992 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
988 993
989 detach.context_id = ctxi->ctxid; 994 detach.context_id = ctxi->ctxid;
990 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 995 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
@@ -1011,17 +1016,20 @@ static void unmap_context(struct ctx_info *ctxi)
1011 1016
1012/** 1017/**
1013 * get_err_page() - obtains and allocates the error notification page 1018 * get_err_page() - obtains and allocates the error notification page
1019 * @cfg: Internal structure associated with the host.
1014 * 1020 *
1015 * Return: error notification page on success, NULL on failure 1021 * Return: error notification page on success, NULL on failure
1016 */ 1022 */
1017static struct page *get_err_page(void) 1023static struct page *get_err_page(struct cxlflash_cfg *cfg)
1018{ 1024{
1019 struct page *err_page = global.err_page; 1025 struct page *err_page = global.err_page;
1026 struct device *dev = &cfg->dev->dev;
1020 1027
1021 if (unlikely(!err_page)) { 1028 if (unlikely(!err_page)) {
1022 err_page = alloc_page(GFP_KERNEL); 1029 err_page = alloc_page(GFP_KERNEL);
1023 if (unlikely(!err_page)) { 1030 if (unlikely(!err_page)) {
1024 pr_err("%s: Unable to allocate err_page!\n", __func__); 1031 dev_err(dev, "%s: Unable to allocate err_page\n",
1032 __func__);
1025 goto out; 1033 goto out;
1026 } 1034 }
1027 1035
@@ -1039,7 +1047,7 @@ static struct page *get_err_page(void)
1039 } 1047 }
1040 1048
1041out: 1049out:
1042 pr_debug("%s: returning err_page=%p\n", __func__, err_page); 1050 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1043 return err_page; 1051 return err_page;
1044} 1052}
1045 1053
@@ -1074,14 +1082,14 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1074 1082
1075 ctxid = cxl_process_element(ctx); 1083 ctxid = cxl_process_element(ctx);
1076 if (unlikely(ctxid < 0)) { 1084 if (unlikely(ctxid < 0)) {
1077 dev_err(dev, "%s: Context %p was closed! (%d)\n", 1085 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1078 __func__, ctx, ctxid); 1086 __func__, ctx, ctxid);
1079 goto err; 1087 goto err;
1080 } 1088 }
1081 1089
1082 ctxi = get_context(cfg, ctxid, file, ctrl); 1090 ctxi = get_context(cfg, ctxid, file, ctrl);
1083 if (unlikely(!ctxi)) { 1091 if (unlikely(!ctxi)) {
1084 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); 1092 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1085 goto err; 1093 goto err;
1086 } 1094 }
1087 1095
@@ -1091,13 +1099,12 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1091 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1099 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1092 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf); 1100 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
1093 } else { 1101 } else {
1094 dev_dbg(dev, "%s: err recovery active, use err_page!\n", 1102 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1095 __func__); 1103 __func__);
1096 1104
1097 err_page = get_err_page(); 1105 err_page = get_err_page(cfg);
1098 if (unlikely(!err_page)) { 1106 if (unlikely(!err_page)) {
1099 dev_err(dev, "%s: Could not obtain error page!\n", 1107 dev_err(dev, "%s: Could not get err_page\n", __func__);
1100 __func__);
1101 rc = VM_FAULT_RETRY; 1108 rc = VM_FAULT_RETRY;
1102 goto out; 1109 goto out;
1103 } 1110 }
@@ -1147,7 +1154,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1147 1154
1148 ctxid = cxl_process_element(ctx); 1155 ctxid = cxl_process_element(ctx);
1149 if (unlikely(ctxid < 0)) { 1156 if (unlikely(ctxid < 0)) {
1150 dev_err(dev, "%s: Context %p was closed! (%d)\n", 1157 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1151 __func__, ctx, ctxid); 1158 __func__, ctx, ctxid);
1152 rc = -EIO; 1159 rc = -EIO;
1153 goto out; 1160 goto out;
@@ -1155,7 +1162,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1155 1162
1156 ctxi = get_context(cfg, ctxid, file, ctrl); 1163 ctxi = get_context(cfg, ctxid, file, ctrl);
1157 if (unlikely(!ctxi)) { 1164 if (unlikely(!ctxi)) {
1158 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); 1165 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1159 rc = -EIO; 1166 rc = -EIO;
1160 goto out; 1167 goto out;
1161 } 1168 }
@@ -1251,7 +1258,7 @@ retry:
1251 break; 1258 break;
1252 goto retry; 1259 goto retry;
1253 case STATE_FAILTERM: 1260 case STATE_FAILTERM:
1254 dev_dbg(dev, "%s: Failed/Terminating!\n", __func__); 1261 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1255 rc = -ENODEV; 1262 rc = -ENODEV;
1256 break; 1263 break;
1257 default: 1264 default:
@@ -1276,7 +1283,7 @@ retry:
1276static int cxlflash_disk_attach(struct scsi_device *sdev, 1283static int cxlflash_disk_attach(struct scsi_device *sdev,
1277 struct dk_cxlflash_attach *attach) 1284 struct dk_cxlflash_attach *attach)
1278{ 1285{
1279 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1286 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1280 struct device *dev = &cfg->dev->dev; 1287 struct device *dev = &cfg->dev->dev;
1281 struct afu *afu = cfg->afu; 1288 struct afu *afu = cfg->afu;
1282 struct llun_info *lli = sdev->hostdata; 1289 struct llun_info *lli = sdev->hostdata;
@@ -1287,6 +1294,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1287 int rc = 0; 1294 int rc = 0;
1288 u32 perms; 1295 u32 perms;
1289 int ctxid = -1; 1296 int ctxid = -1;
1297 u64 flags = 0UL;
1290 u64 rctxid = 0UL; 1298 u64 rctxid = 0UL;
1291 struct file *file = NULL; 1299 struct file *file = NULL;
1292 1300
@@ -1302,24 +1310,24 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1302 } 1310 }
1303 1311
1304 if (gli->max_lba == 0) { 1312 if (gli->max_lba == 0) {
1305 dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n", 1313 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1306 __func__, lli->lun_id[sdev->channel]); 1314 __func__, lli->lun_id[sdev->channel]);
1307 rc = read_cap16(sdev, lli); 1315 rc = read_cap16(sdev, lli);
1308 if (rc) { 1316 if (rc) {
1309 dev_err(dev, "%s: Invalid device! (%d)\n", 1317 dev_err(dev, "%s: Invalid device rc=%d\n",
1310 __func__, rc); 1318 __func__, rc);
1311 rc = -ENODEV; 1319 rc = -ENODEV;
1312 goto out; 1320 goto out;
1313 } 1321 }
1314 dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba); 1322 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1315 dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len); 1323 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1316 } 1324 }
1317 1325
1318 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { 1326 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1319 rctxid = attach->context_id; 1327 rctxid = attach->context_id;
1320 ctxi = get_context(cfg, rctxid, NULL, 0); 1328 ctxi = get_context(cfg, rctxid, NULL, 0);
1321 if (!ctxi) { 1329 if (!ctxi) {
1322 dev_dbg(dev, "%s: Bad context! (%016llX)\n", 1330 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1323 __func__, rctxid); 1331 __func__, rctxid);
1324 rc = -EINVAL; 1332 rc = -EINVAL;
1325 goto out; 1333 goto out;
@@ -1327,7 +1335,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1327 1335
1328 list_for_each_entry(lun_access, &ctxi->luns, list) 1336 list_for_each_entry(lun_access, &ctxi->luns, list)
1329 if (lun_access->lli == lli) { 1337 if (lun_access->lli == lli) {
1330 dev_dbg(dev, "%s: Already attached!\n", 1338 dev_dbg(dev, "%s: Already attached\n",
1331 __func__); 1339 __func__);
1332 rc = -EINVAL; 1340 rc = -EINVAL;
1333 goto out; 1341 goto out;
@@ -1336,13 +1344,13 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1336 1344
1337 rc = scsi_device_get(sdev); 1345 rc = scsi_device_get(sdev);
1338 if (unlikely(rc)) { 1346 if (unlikely(rc)) {
1339 dev_err(dev, "%s: Unable to get sdev reference!\n", __func__); 1347 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1340 goto out; 1348 goto out;
1341 } 1349 }
1342 1350
1343 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); 1351 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1344 if (unlikely(!lun_access)) { 1352 if (unlikely(!lun_access)) {
1345 dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__); 1353 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1346 rc = -ENOMEM; 1354 rc = -ENOMEM;
1347 goto err; 1355 goto err;
1348 } 1356 }
@@ -1352,7 +1360,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1352 1360
1353 /* Non-NULL context indicates reuse (another context reference) */ 1361 /* Non-NULL context indicates reuse (another context reference) */
1354 if (ctxi) { 1362 if (ctxi) {
1355 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n", 1363 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1356 __func__, rctxid); 1364 __func__, rctxid);
1357 kref_get(&ctxi->kref); 1365 kref_get(&ctxi->kref);
1358 list_add(&lun_access->list, &ctxi->luns); 1366 list_add(&lun_access->list, &ctxi->luns);
@@ -1361,7 +1369,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1361 1369
1362 ctxi = create_context(cfg); 1370 ctxi = create_context(cfg);
1363 if (unlikely(!ctxi)) { 1371 if (unlikely(!ctxi)) {
1364 dev_err(dev, "%s: Failed to create context! (%d)\n", 1372 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1365 __func__, ctxid); 1373 __func__, ctxid);
1366 goto err; 1374 goto err;
1367 } 1375 }
@@ -1387,7 +1395,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1387 1395
1388 ctxid = cxl_process_element(ctx); 1396 ctxid = cxl_process_element(ctx);
1389 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1397 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1390 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); 1398 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1391 rc = -EPERM; 1399 rc = -EPERM;
1392 goto err; 1400 goto err;
1393 } 1401 }
@@ -1426,10 +1434,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1426 1434
1427out_attach: 1435out_attach:
1428 if (fd != -1) 1436 if (fd != -1)
1429 attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD; 1437 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1430 else 1438 if (afu_is_sq_cmd_mode(afu))
1431 attach->hdr.return_flags = 0; 1439 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1432 1440
1441 attach->hdr.return_flags = flags;
1433 attach->context_id = ctxi->ctxid; 1442 attach->context_id = ctxi->ctxid;
1434 attach->block_size = gli->blk_len; 1443 attach->block_size = gli->blk_len;
1435 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1444 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
@@ -1520,7 +1529,7 @@ static int recover_context(struct cxlflash_cfg *cfg,
1520 1529
1521 ctxid = cxl_process_element(ctx); 1530 ctxid = cxl_process_element(ctx);
1522 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1531 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1523 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); 1532 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1524 rc = -EPERM; 1533 rc = -EPERM;
1525 goto err2; 1534 goto err2;
1526 } 1535 }
@@ -1611,12 +1620,13 @@ err1:
1611static int cxlflash_afu_recover(struct scsi_device *sdev, 1620static int cxlflash_afu_recover(struct scsi_device *sdev,
1612 struct dk_cxlflash_recover_afu *recover) 1621 struct dk_cxlflash_recover_afu *recover)
1613{ 1622{
1614 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1623 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1615 struct device *dev = &cfg->dev->dev; 1624 struct device *dev = &cfg->dev->dev;
1616 struct llun_info *lli = sdev->hostdata; 1625 struct llun_info *lli = sdev->hostdata;
1617 struct afu *afu = cfg->afu; 1626 struct afu *afu = cfg->afu;
1618 struct ctx_info *ctxi = NULL; 1627 struct ctx_info *ctxi = NULL;
1619 struct mutex *mutex = &cfg->ctx_recovery_mutex; 1628 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1629 u64 flags;
1620 u64 ctxid = DECODE_CTXID(recover->context_id), 1630 u64 ctxid = DECODE_CTXID(recover->context_id),
1621 rctxid = recover->context_id; 1631 rctxid = recover->context_id;
1622 long reg; 1632 long reg;
@@ -1632,19 +1642,19 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
1632 goto out; 1642 goto out;
1633 rc = check_state(cfg); 1643 rc = check_state(cfg);
1634 if (rc) { 1644 if (rc) {
1635 dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc); 1645 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1636 rc = -ENODEV; 1646 rc = -ENODEV;
1637 goto out; 1647 goto out;
1638 } 1648 }
1639 1649
1640 dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", 1650 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1641 __func__, recover->reason, rctxid); 1651 __func__, recover->reason, rctxid);
1642 1652
1643retry: 1653retry:
1644 /* Ensure that this process is attached to the context */ 1654 /* Ensure that this process is attached to the context */
1645 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 1655 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1646 if (unlikely(!ctxi)) { 1656 if (unlikely(!ctxi)) {
1647 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1657 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1648 rc = -EINVAL; 1658 rc = -EINVAL;
1649 goto out; 1659 goto out;
1650 } 1660 }
@@ -1653,12 +1663,12 @@ retry:
1653retry_recover: 1663retry_recover:
1654 rc = recover_context(cfg, ctxi, &new_adap_fd); 1664 rc = recover_context(cfg, ctxi, &new_adap_fd);
1655 if (unlikely(rc)) { 1665 if (unlikely(rc)) {
1656 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n", 1666 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1657 __func__, ctxid, rc); 1667 __func__, ctxid, rc);
1658 if ((rc == -ENODEV) && 1668 if ((rc == -ENODEV) &&
1659 ((atomic_read(&cfg->recovery_threads) > 1) || 1669 ((atomic_read(&cfg->recovery_threads) > 1) ||
1660 (lretry--))) { 1670 (lretry--))) {
1661 dev_dbg(dev, "%s: Going to try again!\n", 1671 dev_dbg(dev, "%s: Going to try again\n",
1662 __func__); 1672 __func__);
1663 mutex_unlock(mutex); 1673 mutex_unlock(mutex);
1664 msleep(100); 1674 msleep(100);
@@ -1672,11 +1682,16 @@ retry_recover:
1672 } 1682 }
1673 1683
1674 ctxi->err_recovery_active = false; 1684 ctxi->err_recovery_active = false;
1685
1686 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1687 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1688 if (afu_is_sq_cmd_mode(afu))
1689 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1690
1691 recover->hdr.return_flags = flags;
1675 recover->context_id = ctxi->ctxid; 1692 recover->context_id = ctxi->ctxid;
1676 recover->adap_fd = new_adap_fd; 1693 recover->adap_fd = new_adap_fd;
1677 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1694 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1678 recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1679 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1680 goto out; 1695 goto out;
1681 } 1696 }
1682 1697
@@ -1699,7 +1714,7 @@ retry_recover:
1699 goto retry; 1714 goto retry;
1700 } 1715 }
1701 1716
1702 dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__); 1717 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1703out: 1718out:
1704 if (likely(ctxi)) 1719 if (likely(ctxi))
1705 put_context(ctxi); 1720 put_context(ctxi);
@@ -1718,7 +1733,7 @@ out:
1718static int process_sense(struct scsi_device *sdev, 1733static int process_sense(struct scsi_device *sdev,
1719 struct dk_cxlflash_verify *verify) 1734 struct dk_cxlflash_verify *verify)
1720{ 1735{
1721 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1736 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1722 struct device *dev = &cfg->dev->dev; 1737 struct device *dev = &cfg->dev->dev;
1723 struct llun_info *lli = sdev->hostdata; 1738 struct llun_info *lli = sdev->hostdata;
1724 struct glun_info *gli = lli->parent; 1739 struct glun_info *gli = lli->parent;
@@ -1729,7 +1744,7 @@ static int process_sense(struct scsi_device *sdev,
1729 rc = scsi_normalize_sense((const u8 *)&verify->sense_data, 1744 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1730 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); 1745 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1731 if (!rc) { 1746 if (!rc) {
1732 dev_err(dev, "%s: Failed to normalize sense data!\n", __func__); 1747 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1733 rc = -EINVAL; 1748 rc = -EINVAL;
1734 goto out; 1749 goto out;
1735 } 1750 }
@@ -1785,7 +1800,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
1785{ 1800{
1786 int rc = 0; 1801 int rc = 0;
1787 struct ctx_info *ctxi = NULL; 1802 struct ctx_info *ctxi = NULL;
1788 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1803 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1789 struct device *dev = &cfg->dev->dev; 1804 struct device *dev = &cfg->dev->dev;
1790 struct llun_info *lli = sdev->hostdata; 1805 struct llun_info *lli = sdev->hostdata;
1791 struct glun_info *gli = lli->parent; 1806 struct glun_info *gli = lli->parent;
@@ -1795,20 +1810,20 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
1795 rctxid = verify->context_id; 1810 rctxid = verify->context_id;
1796 u64 last_lba = 0; 1811 u64 last_lba = 0;
1797 1812
1798 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, " 1813 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1799 "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle, 1814 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1800 verify->hint, verify->hdr.flags); 1815 verify->hint, verify->hdr.flags);
1801 1816
1802 ctxi = get_context(cfg, rctxid, lli, 0); 1817 ctxi = get_context(cfg, rctxid, lli, 0);
1803 if (unlikely(!ctxi)) { 1818 if (unlikely(!ctxi)) {
1804 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1819 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1805 rc = -EINVAL; 1820 rc = -EINVAL;
1806 goto out; 1821 goto out;
1807 } 1822 }
1808 1823
1809 rhte = get_rhte(ctxi, rhndl, lli); 1824 rhte = get_rhte(ctxi, rhndl, lli);
1810 if (unlikely(!rhte)) { 1825 if (unlikely(!rhte)) {
1811 dev_dbg(dev, "%s: Bad resource handle! (%d)\n", 1826 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1812 __func__, rhndl); 1827 __func__, rhndl);
1813 rc = -EINVAL; 1828 rc = -EINVAL;
1814 goto out; 1829 goto out;
@@ -1855,7 +1870,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
1855out: 1870out:
1856 if (likely(ctxi)) 1871 if (likely(ctxi))
1857 put_context(ctxi); 1872 put_context(ctxi);
1858 dev_dbg(dev, "%s: returning rc=%d llba=%llX\n", 1873 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1859 __func__, rc, verify->last_lba); 1874 __func__, rc, verify->last_lba);
1860 return rc; 1875 return rc;
1861} 1876}
@@ -1907,7 +1922,7 @@ static char *decode_ioctl(int cmd)
1907 */ 1922 */
1908static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) 1923static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1909{ 1924{
1910 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1925 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1911 struct device *dev = &cfg->dev->dev; 1926 struct device *dev = &cfg->dev->dev;
1912 struct afu *afu = cfg->afu; 1927 struct afu *afu = cfg->afu;
1913 struct llun_info *lli = sdev->hostdata; 1928 struct llun_info *lli = sdev->hostdata;
@@ -1927,25 +1942,25 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1927 struct ctx_info *ctxi = NULL; 1942 struct ctx_info *ctxi = NULL;
1928 struct sisl_rht_entry *rhte = NULL; 1943 struct sisl_rht_entry *rhte = NULL;
1929 1944
1930 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size); 1945 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1931 1946
1932 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); 1947 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1933 if (unlikely(rc)) { 1948 if (unlikely(rc)) {
1934 dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n", 1949 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1935 __func__);
1936 goto out; 1950 goto out;
1937 } 1951 }
1938 1952
1939 ctxi = get_context(cfg, rctxid, lli, 0); 1953 ctxi = get_context(cfg, rctxid, lli, 0);
1940 if (unlikely(!ctxi)) { 1954 if (unlikely(!ctxi)) {
1941 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1955 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1942 rc = -EINVAL; 1956 rc = -EINVAL;
1943 goto err1; 1957 goto err1;
1944 } 1958 }
1945 1959
1946 rhte = rhte_checkout(ctxi, lli); 1960 rhte = rhte_checkout(ctxi, lli);
1947 if (unlikely(!rhte)) { 1961 if (unlikely(!rhte)) {
1948 dev_dbg(dev, "%s: too many opens for this context\n", __func__); 1962 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
1963 __func__, ctxid);
1949 rc = -EMFILE; /* too many opens */ 1964 rc = -EMFILE; /* too many opens */
1950 goto err1; 1965 goto err1;
1951 } 1966 }
@@ -1963,7 +1978,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1963out: 1978out:
1964 if (likely(ctxi)) 1979 if (likely(ctxi))
1965 put_context(ctxi); 1980 put_context(ctxi);
1966 dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n", 1981 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
1967 __func__, rsrc_handle, rc, last_lba); 1982 __func__, rsrc_handle, rc, last_lba);
1968 return rc; 1983 return rc;
1969 1984
@@ -1985,7 +2000,7 @@ err1:
1985 */ 2000 */
1986static int ioctl_common(struct scsi_device *sdev, int cmd) 2001static int ioctl_common(struct scsi_device *sdev, int cmd)
1987{ 2002{
1988 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 2003 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1989 struct device *dev = &cfg->dev->dev; 2004 struct device *dev = &cfg->dev->dev;
1990 struct llun_info *lli = sdev->hostdata; 2005 struct llun_info *lli = sdev->hostdata;
1991 int rc = 0; 2006 int rc = 0;
@@ -2002,7 +2017,7 @@ static int ioctl_common(struct scsi_device *sdev, int cmd)
2002 case DK_CXLFLASH_VLUN_RESIZE: 2017 case DK_CXLFLASH_VLUN_RESIZE:
2003 case DK_CXLFLASH_RELEASE: 2018 case DK_CXLFLASH_RELEASE:
2004 case DK_CXLFLASH_DETACH: 2019 case DK_CXLFLASH_DETACH:
2005 dev_dbg(dev, "%s: Command override! (%d)\n", 2020 dev_dbg(dev, "%s: Command override rc=%d\n",
2006 __func__, rc); 2021 __func__, rc);
2007 rc = 0; 2022 rc = 0;
2008 break; 2023 break;
@@ -2032,7 +2047,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2032{ 2047{
2033 typedef int (*sioctl) (struct scsi_device *, void *); 2048 typedef int (*sioctl) (struct scsi_device *, void *);
2034 2049
2035 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 2050 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2036 struct device *dev = &cfg->dev->dev; 2051 struct device *dev = &cfg->dev->dev;
2037 struct afu *afu = cfg->afu; 2052 struct afu *afu = cfg->afu;
2038 struct dk_cxlflash_hdr *hdr; 2053 struct dk_cxlflash_hdr *hdr;
@@ -2111,7 +2126,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2111 } 2126 }
2112 2127
2113 if (unlikely(copy_from_user(&buf, arg, size))) { 2128 if (unlikely(copy_from_user(&buf, arg, size))) {
2114 dev_err(dev, "%s: copy_from_user() fail! " 2129 dev_err(dev, "%s: copy_from_user() fail "
2115 "size=%lu cmd=%d (%s) arg=%p\n", 2130 "size=%lu cmd=%d (%s) arg=%p\n",
2116 __func__, size, cmd, decode_ioctl(cmd), arg); 2131 __func__, size, cmd, decode_ioctl(cmd), arg);
2117 rc = -EFAULT; 2132 rc = -EFAULT;
@@ -2127,7 +2142,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2127 } 2142 }
2128 2143
2129 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { 2144 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2130 dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__); 2145 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2131 rc = -EINVAL; 2146 rc = -EINVAL;
2132 goto cxlflash_ioctl_exit; 2147 goto cxlflash_ioctl_exit;
2133 } 2148 }
@@ -2135,7 +2150,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2135 rc = do_ioctl(sdev, (void *)&buf); 2150 rc = do_ioctl(sdev, (void *)&buf);
2136 if (likely(!rc)) 2151 if (likely(!rc))
2137 if (unlikely(copy_to_user(arg, &buf, size))) { 2152 if (unlikely(copy_to_user(arg, &buf, size))) {
2138 dev_err(dev, "%s: copy_to_user() fail! " 2153 dev_err(dev, "%s: copy_to_user() fail "
2139 "size=%lu cmd=%d (%s) arg=%p\n", 2154 "size=%lu cmd=%d (%s) arg=%p\n",
2140 __func__, size, cmd, decode_ioctl(cmd), arg); 2155 __func__, size, cmd, decode_ioctl(cmd), arg);
2141 rc = -EFAULT; 2156 rc = -EFAULT;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 90c5d7f5278e..8fcc804dbef9 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -66,8 +66,8 @@ static int ba_init(struct ba_lun *ba_lun)
66 int last_word_underflow = 0; 66 int last_word_underflow = 0;
67 u64 *lam; 67 u64 *lam;
68 68
69 pr_debug("%s: Initializing LUN: lun_id = %llX, " 69 pr_debug("%s: Initializing LUN: lun_id=%016llx "
70 "ba_lun->lsize = %lX, ba_lun->au_size = %lX\n", 70 "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
71 __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size); 71 __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
72 72
73 /* Calculate bit map size */ 73 /* Calculate bit map size */
@@ -80,7 +80,7 @@ static int ba_init(struct ba_lun *ba_lun)
80 /* Allocate lun information container */ 80 /* Allocate lun information container */
81 bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL); 81 bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
82 if (unlikely(!bali)) { 82 if (unlikely(!bali)) {
83 pr_err("%s: Failed to allocate lun_info for lun_id %llX\n", 83 pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
84 __func__, ba_lun->lun_id); 84 __func__, ba_lun->lun_id);
85 return -ENOMEM; 85 return -ENOMEM;
86 } 86 }
@@ -96,7 +96,7 @@ static int ba_init(struct ba_lun *ba_lun)
96 GFP_KERNEL); 96 GFP_KERNEL);
97 if (unlikely(!bali->lun_alloc_map)) { 97 if (unlikely(!bali->lun_alloc_map)) {
98 pr_err("%s: Failed to allocate lun allocation map: " 98 pr_err("%s: Failed to allocate lun allocation map: "
99 "lun_id = %llX\n", __func__, ba_lun->lun_id); 99 "lun_id=%016llx\n", __func__, ba_lun->lun_id);
100 kfree(bali); 100 kfree(bali);
101 return -ENOMEM; 101 return -ENOMEM;
102 } 102 }
@@ -125,7 +125,7 @@ static int ba_init(struct ba_lun *ba_lun)
125 bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)), 125 bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
126 GFP_KERNEL); 126 GFP_KERNEL);
127 if (unlikely(!bali->aun_clone_map)) { 127 if (unlikely(!bali->aun_clone_map)) {
128 pr_err("%s: Failed to allocate clone map: lun_id = %llX\n", 128 pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
129 __func__, ba_lun->lun_id); 129 __func__, ba_lun->lun_id);
130 kfree(bali->lun_alloc_map); 130 kfree(bali->lun_alloc_map);
131 kfree(bali); 131 kfree(bali);
@@ -136,7 +136,7 @@ static int ba_init(struct ba_lun *ba_lun)
136 ba_lun->ba_lun_handle = bali; 136 ba_lun->ba_lun_handle = bali;
137 137
138 pr_debug("%s: Successfully initialized the LUN: " 138 pr_debug("%s: Successfully initialized the LUN: "
139 "lun_id = %llX, bitmap size = %X, free_aun_cnt = %llX\n", 139 "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
140 __func__, ba_lun->lun_id, bali->lun_bmap_size, 140 __func__, ba_lun->lun_id, bali->lun_bmap_size,
141 bali->free_aun_cnt); 141 bali->free_aun_cnt);
142 return 0; 142 return 0;
@@ -165,10 +165,9 @@ static int find_free_range(u32 low,
165 num_bits = (sizeof(*lam) * BITS_PER_BYTE); 165 num_bits = (sizeof(*lam) * BITS_PER_BYTE);
166 bit_pos = find_first_bit(lam, num_bits); 166 bit_pos = find_first_bit(lam, num_bits);
167 167
168 pr_devel("%s: Found free bit %llX in LUN " 168 pr_devel("%s: Found free bit %llu in LUN "
169 "map entry %llX at bitmap index = %X\n", 169 "map entry %016llx at bitmap index = %d\n",
170 __func__, bit_pos, bali->lun_alloc_map[i], 170 __func__, bit_pos, bali->lun_alloc_map[i], i);
171 i);
172 171
173 *bit_word = i; 172 *bit_word = i;
174 bali->free_aun_cnt--; 173 bali->free_aun_cnt--;
@@ -194,11 +193,11 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
194 bali = ba_lun->ba_lun_handle; 193 bali = ba_lun->ba_lun_handle;
195 194
196 pr_debug("%s: Received block allocation request: " 195 pr_debug("%s: Received block allocation request: "
197 "lun_id = %llX, free_aun_cnt = %llX\n", 196 "lun_id=%016llx free_aun_cnt=%llx\n",
198 __func__, ba_lun->lun_id, bali->free_aun_cnt); 197 __func__, ba_lun->lun_id, bali->free_aun_cnt);
199 198
200 if (bali->free_aun_cnt == 0) { 199 if (bali->free_aun_cnt == 0) {
201 pr_debug("%s: No space left on LUN: lun_id = %llX\n", 200 pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
202 __func__, ba_lun->lun_id); 201 __func__, ba_lun->lun_id);
203 return -1ULL; 202 return -1ULL;
204 } 203 }
@@ -212,7 +211,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
212 bali, &bit_word); 211 bali, &bit_word);
213 if (bit_pos == -1) { 212 if (bit_pos == -1) {
214 pr_debug("%s: Could not find an allocation unit on LUN:" 213 pr_debug("%s: Could not find an allocation unit on LUN:"
215 " lun_id = %llX\n", __func__, ba_lun->lun_id); 214 " lun_id=%016llx\n", __func__, ba_lun->lun_id);
216 return -1ULL; 215 return -1ULL;
217 } 216 }
218 } 217 }
@@ -223,8 +222,8 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
223 else 222 else
224 bali->free_curr_idx = bit_word; 223 bali->free_curr_idx = bit_word;
225 224
226 pr_debug("%s: Allocating AU number %llX, on lun_id %llX, " 225 pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
227 "free_aun_cnt = %llX\n", __func__, 226 "free_aun_cnt=%llx\n", __func__,
228 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id, 227 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
229 bali->free_aun_cnt); 228 bali->free_aun_cnt);
230 229
@@ -266,18 +265,18 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
266 bali = ba_lun->ba_lun_handle; 265 bali = ba_lun->ba_lun_handle;
267 266
268 if (validate_alloc(bali, to_free)) { 267 if (validate_alloc(bali, to_free)) {
269 pr_debug("%s: The AUN %llX is not allocated on lun_id %llX\n", 268 pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
270 __func__, to_free, ba_lun->lun_id); 269 __func__, to_free, ba_lun->lun_id);
271 return -1; 270 return -1;
272 } 271 }
273 272
274 pr_debug("%s: Received a request to free AU %llX on lun_id %llX, " 273 pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
275 "free_aun_cnt = %llX\n", __func__, to_free, ba_lun->lun_id, 274 "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
276 bali->free_aun_cnt); 275 bali->free_aun_cnt);
277 276
278 if (bali->aun_clone_map[to_free] > 0) { 277 if (bali->aun_clone_map[to_free] > 0) {
279 pr_debug("%s: AUN %llX on lun_id %llX has been cloned. Clone " 278 pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
280 "count = %X\n", __func__, to_free, ba_lun->lun_id, 279 __func__, to_free, ba_lun->lun_id,
281 bali->aun_clone_map[to_free]); 280 bali->aun_clone_map[to_free]);
282 bali->aun_clone_map[to_free]--; 281 bali->aun_clone_map[to_free]--;
283 return 0; 282 return 0;
@@ -294,8 +293,8 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
294 else if (idx > bali->free_high_idx) 293 else if (idx > bali->free_high_idx)
295 bali->free_high_idx = idx; 294 bali->free_high_idx = idx;
296 295
297 pr_debug("%s: Successfully freed AU at bit_pos %X, bit map index %X on " 296 pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
298 "lun_id %llX, free_aun_cnt = %llX\n", __func__, bit_pos, idx, 297 "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
299 ba_lun->lun_id, bali->free_aun_cnt); 298 ba_lun->lun_id, bali->free_aun_cnt);
300 299
301 return 0; 300 return 0;
@@ -313,16 +312,16 @@ static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
313 struct ba_lun_info *bali = ba_lun->ba_lun_handle; 312 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
314 313
315 if (validate_alloc(bali, to_clone)) { 314 if (validate_alloc(bali, to_clone)) {
316 pr_debug("%s: AUN %llX is not allocated on lun_id %llX\n", 315 pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
317 __func__, to_clone, ba_lun->lun_id); 316 __func__, to_clone, ba_lun->lun_id);
318 return -1; 317 return -1;
319 } 318 }
320 319
321 pr_debug("%s: Received a request to clone AUN %llX on lun_id %llX\n", 320 pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
322 __func__, to_clone, ba_lun->lun_id); 321 __func__, to_clone, ba_lun->lun_id);
323 322
324 if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) { 323 if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
325 pr_debug("%s: AUN %llX on lun_id %llX hit max clones already\n", 324 pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
326 __func__, to_clone, ba_lun->lun_id); 325 __func__, to_clone, ba_lun->lun_id);
327 return -1; 326 return -1;
328 } 327 }
@@ -433,7 +432,7 @@ static int write_same16(struct scsi_device *sdev,
433 u64 offset = lba; 432 u64 offset = lba;
434 int left = nblks; 433 int left = nblks;
435 u32 to = sdev->request_queue->rq_timeout; 434 u32 to = sdev->request_queue->rq_timeout;
436 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 435 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
437 struct device *dev = &cfg->dev->dev; 436 struct device *dev = &cfg->dev->dev;
438 437
439 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); 438 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
@@ -459,7 +458,7 @@ static int write_same16(struct scsi_device *sdev,
459 down_read(&cfg->ioctl_rwsem); 458 down_read(&cfg->ioctl_rwsem);
460 rc = check_state(cfg); 459 rc = check_state(cfg);
461 if (rc) { 460 if (rc) {
462 dev_err(dev, "%s: Failed state! result=0x08%X\n", 461 dev_err(dev, "%s: Failed state result=%08x\n",
463 __func__, result); 462 __func__, result);
464 rc = -ENODEV; 463 rc = -ENODEV;
465 goto out; 464 goto out;
@@ -467,7 +466,7 @@ static int write_same16(struct scsi_device *sdev,
467 466
468 if (result) { 467 if (result) {
469 dev_err_ratelimited(dev, "%s: command failed for " 468 dev_err_ratelimited(dev, "%s: command failed for "
470 "offset %lld result=0x%x\n", 469 "offset=%lld result=%08x\n",
471 __func__, offset, result); 470 __func__, offset, result);
472 rc = -EIO; 471 rc = -EIO;
473 goto out; 472 goto out;
@@ -480,7 +479,7 @@ out:
480 kfree(cmd_buf); 479 kfree(cmd_buf);
481 kfree(scsi_cmd); 480 kfree(scsi_cmd);
482 kfree(sense_buf); 481 kfree(sense_buf);
483 pr_debug("%s: returning rc=%d\n", __func__, rc); 482 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
484 return rc; 483 return rc;
485} 484}
486 485
@@ -508,6 +507,8 @@ static int grow_lxt(struct afu *afu,
508 struct sisl_rht_entry *rhte, 507 struct sisl_rht_entry *rhte,
509 u64 *new_size) 508 u64 *new_size)
510{ 509{
510 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
511 struct device *dev = &cfg->dev->dev;
511 struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL; 512 struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
512 struct llun_info *lli = sdev->hostdata; 513 struct llun_info *lli = sdev->hostdata;
513 struct glun_info *gli = lli->parent; 514 struct glun_info *gli = lli->parent;
@@ -527,7 +528,8 @@ static int grow_lxt(struct afu *afu,
527 mutex_lock(&blka->mutex); 528 mutex_lock(&blka->mutex);
528 av_size = ba_space(&blka->ba_lun); 529 av_size = ba_space(&blka->ba_lun);
529 if (unlikely(av_size <= 0)) { 530 if (unlikely(av_size <= 0)) {
530 pr_debug("%s: ba_space error: av_size %d\n", __func__, av_size); 531 dev_dbg(dev, "%s: ba_space error av_size=%d\n",
532 __func__, av_size);
531 mutex_unlock(&blka->mutex); 533 mutex_unlock(&blka->mutex);
532 rc = -ENOSPC; 534 rc = -ENOSPC;
533 goto out; 535 goto out;
@@ -568,8 +570,8 @@ static int grow_lxt(struct afu *afu,
568 */ 570 */
569 aun = ba_alloc(&blka->ba_lun); 571 aun = ba_alloc(&blka->ba_lun);
570 if ((aun == -1ULL) || (aun >= blka->nchunk)) 572 if ((aun == -1ULL) || (aun >= blka->nchunk))
571 pr_debug("%s: ba_alloc error: allocated chunk# %llX, " 573 dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
572 "max %llX\n", __func__, aun, blka->nchunk - 1); 574 "max=%llu\n", __func__, aun, blka->nchunk - 1);
573 575
574 /* select both ports, use r/w perms from RHT */ 576 /* select both ports, use r/w perms from RHT */
575 lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) | 577 lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
@@ -599,7 +601,7 @@ static int grow_lxt(struct afu *afu,
599 kfree(lxt_old); 601 kfree(lxt_old);
600 *new_size = my_new_size; 602 *new_size = my_new_size;
601out: 603out:
602 pr_debug("%s: returning rc=%d\n", __func__, rc); 604 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
603 return rc; 605 return rc;
604} 606}
605 607
@@ -621,6 +623,8 @@ static int shrink_lxt(struct afu *afu,
621 struct ctx_info *ctxi, 623 struct ctx_info *ctxi,
622 u64 *new_size) 624 u64 *new_size)
623{ 625{
626 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
627 struct device *dev = &cfg->dev->dev;
624 struct sisl_lxt_entry *lxt, *lxt_old; 628 struct sisl_lxt_entry *lxt, *lxt_old;
625 struct llun_info *lli = sdev->hostdata; 629 struct llun_info *lli = sdev->hostdata;
626 struct glun_info *gli = lli->parent; 630 struct glun_info *gli = lli->parent;
@@ -706,7 +710,7 @@ static int shrink_lxt(struct afu *afu,
706 kfree(lxt_old); 710 kfree(lxt_old);
707 *new_size = my_new_size; 711 *new_size = my_new_size;
708out: 712out:
709 pr_debug("%s: returning rc=%d\n", __func__, rc); 713 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
710 return rc; 714 return rc;
711} 715}
712 716
@@ -728,7 +732,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
728 struct ctx_info *ctxi, 732 struct ctx_info *ctxi,
729 struct dk_cxlflash_resize *resize) 733 struct dk_cxlflash_resize *resize)
730{ 734{
731 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 735 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
736 struct device *dev = &cfg->dev->dev;
732 struct llun_info *lli = sdev->hostdata; 737 struct llun_info *lli = sdev->hostdata;
733 struct glun_info *gli = lli->parent; 738 struct glun_info *gli = lli->parent;
734 struct afu *afu = cfg->afu; 739 struct afu *afu = cfg->afu;
@@ -751,13 +756,13 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
751 nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len; 756 nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
752 new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE); 757 new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
753 758
754 pr_debug("%s: ctxid=%llu rhndl=0x%llx, req_size=0x%llx," 759 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
755 "new_size=%llx\n", __func__, ctxid, resize->rsrc_handle, 760 __func__, ctxid, resize->rsrc_handle, resize->req_size,
756 resize->req_size, new_size); 761 new_size);
757 762
758 if (unlikely(gli->mode != MODE_VIRTUAL)) { 763 if (unlikely(gli->mode != MODE_VIRTUAL)) {
759 pr_debug("%s: LUN mode does not support resize! (%d)\n", 764 dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
760 __func__, gli->mode); 765 __func__, gli->mode);
761 rc = -EINVAL; 766 rc = -EINVAL;
762 goto out; 767 goto out;
763 768
@@ -766,7 +771,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
766 if (!ctxi) { 771 if (!ctxi) {
767 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 772 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
768 if (unlikely(!ctxi)) { 773 if (unlikely(!ctxi)) {
769 pr_debug("%s: Bad context! (%llu)\n", __func__, ctxid); 774 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
775 __func__, ctxid);
770 rc = -EINVAL; 776 rc = -EINVAL;
771 goto out; 777 goto out;
772 } 778 }
@@ -776,7 +782,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
776 782
777 rhte = get_rhte(ctxi, rhndl, lli); 783 rhte = get_rhte(ctxi, rhndl, lli);
778 if (unlikely(!rhte)) { 784 if (unlikely(!rhte)) {
779 pr_debug("%s: Bad resource handle! (%u)\n", __func__, rhndl); 785 dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
786 __func__, rhndl);
780 rc = -EINVAL; 787 rc = -EINVAL;
781 goto out; 788 goto out;
782 } 789 }
@@ -794,8 +801,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
794out: 801out:
795 if (put_ctx) 802 if (put_ctx)
796 put_context(ctxi); 803 put_context(ctxi);
797 pr_debug("%s: resized to %lld returning rc=%d\n", 804 dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
798 __func__, resize->last_lba, rc); 805 __func__, resize->last_lba, rc);
799 return rc; 806 return rc;
800} 807}
801 808
@@ -815,6 +822,7 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
815 u32 chan; 822 u32 chan;
816 u32 lind; 823 u32 lind;
817 struct afu *afu = cfg->afu; 824 struct afu *afu = cfg->afu;
825 struct device *dev = &cfg->dev->dev;
818 struct sisl_global_map __iomem *agm = &afu->afu_map->global; 826 struct sisl_global_map __iomem *agm = &afu->afu_map->global;
819 827
820 mutex_lock(&global.mutex); 828 mutex_lock(&global.mutex);
@@ -828,15 +836,15 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
828 if (lli->port_sel == BOTH_PORTS) { 836 if (lli->port_sel == BOTH_PORTS) {
829 writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]); 837 writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
830 writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]); 838 writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
831 pr_debug("%s: Virtual LUN on slot %d id0=%llx, " 839 dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx "
832 "id1=%llx\n", __func__, lind, 840 "id1=%llx\n", __func__, lind,
833 lli->lun_id[0], lli->lun_id[1]); 841 lli->lun_id[0], lli->lun_id[1]);
834 } else { 842 } else {
835 chan = PORT2CHAN(lli->port_sel); 843 chan = PORT2CHAN(lli->port_sel);
836 writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]); 844 writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
837 pr_debug("%s: Virtual LUN on slot %d chan=%d, " 845 dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d "
838 "id=%llx\n", __func__, lind, chan, 846 "id=%llx\n", __func__, lind, chan,
839 lli->lun_id[chan]); 847 lli->lun_id[chan]);
840 } 848 }
841 } 849 }
842 850
@@ -860,6 +868,7 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
860 u32 lind; 868 u32 lind;
861 int rc = 0; 869 int rc = 0;
862 struct afu *afu = cfg->afu; 870 struct afu *afu = cfg->afu;
871 struct device *dev = &cfg->dev->dev;
863 struct sisl_global_map __iomem *agm = &afu->afu_map->global; 872 struct sisl_global_map __iomem *agm = &afu->afu_map->global;
864 873
865 mutex_lock(&global.mutex); 874 mutex_lock(&global.mutex);
@@ -882,8 +891,8 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
882 writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]); 891 writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
883 writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]); 892 writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
884 cfg->promote_lun_index++; 893 cfg->promote_lun_index++;
885 pr_debug("%s: Virtual LUN on slot %d id0=%llx, id1=%llx\n", 894 dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx id1=%llx\n",
886 __func__, lind, lli->lun_id[0], lli->lun_id[1]); 895 __func__, lind, lli->lun_id[0], lli->lun_id[1]);
887 } else { 896 } else {
888 /* 897 /*
889 * If this LUN is visible only from one port, we will put 898 * If this LUN is visible only from one port, we will put
@@ -898,14 +907,14 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
898 lind = lli->lun_index = cfg->last_lun_index[chan]; 907 lind = lli->lun_index = cfg->last_lun_index[chan];
899 writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]); 908 writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
900 cfg->last_lun_index[chan]--; 909 cfg->last_lun_index[chan]--;
901 pr_debug("%s: Virtual LUN on slot %d chan=%d, id=%llx\n", 910 dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d id=%llx\n",
902 __func__, lind, chan, lli->lun_id[chan]); 911 __func__, lind, chan, lli->lun_id[chan]);
903 } 912 }
904 913
905 lli->in_table = true; 914 lli->in_table = true;
906out: 915out:
907 mutex_unlock(&global.mutex); 916 mutex_unlock(&global.mutex);
908 pr_debug("%s: returning rc=%d\n", __func__, rc); 917 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
909 return rc; 918 return rc;
910} 919}
911 920
@@ -923,7 +932,7 @@ out:
923 */ 932 */
924int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) 933int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
925{ 934{
926 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 935 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
927 struct device *dev = &cfg->dev->dev; 936 struct device *dev = &cfg->dev->dev;
928 struct llun_info *lli = sdev->hostdata; 937 struct llun_info *lli = sdev->hostdata;
929 struct glun_info *gli = lli->parent; 938 struct glun_info *gli = lli->parent;
@@ -942,14 +951,14 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
942 struct ctx_info *ctxi = NULL; 951 struct ctx_info *ctxi = NULL;
943 struct sisl_rht_entry *rhte = NULL; 952 struct sisl_rht_entry *rhte = NULL;
944 953
945 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size); 954 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
946 955
947 /* Setup the LUNs block allocator on first call */ 956 /* Setup the LUNs block allocator on first call */
948 mutex_lock(&gli->mutex); 957 mutex_lock(&gli->mutex);
949 if (gli->mode == MODE_NONE) { 958 if (gli->mode == MODE_NONE) {
950 rc = init_vlun(lli); 959 rc = init_vlun(lli);
951 if (rc) { 960 if (rc) {
952 dev_err(dev, "%s: call to init_vlun failed rc=%d!\n", 961 dev_err(dev, "%s: init_vlun failed rc=%d\n",
953 __func__, rc); 962 __func__, rc);
954 rc = -ENOMEM; 963 rc = -ENOMEM;
955 goto err0; 964 goto err0;
@@ -958,29 +967,28 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
958 967
959 rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true); 968 rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
960 if (unlikely(rc)) { 969 if (unlikely(rc)) {
961 dev_err(dev, "%s: Failed to attach to LUN! (VIRTUAL)\n", 970 dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
962 __func__);
963 goto err0; 971 goto err0;
964 } 972 }
965 mutex_unlock(&gli->mutex); 973 mutex_unlock(&gli->mutex);
966 974
967 rc = init_luntable(cfg, lli); 975 rc = init_luntable(cfg, lli);
968 if (rc) { 976 if (rc) {
969 dev_err(dev, "%s: call to init_luntable failed rc=%d!\n", 977 dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
970 __func__, rc);
971 goto err1; 978 goto err1;
972 } 979 }
973 980
974 ctxi = get_context(cfg, rctxid, lli, 0); 981 ctxi = get_context(cfg, rctxid, lli, 0);
975 if (unlikely(!ctxi)) { 982 if (unlikely(!ctxi)) {
976 dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 983 dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
977 rc = -EINVAL; 984 rc = -EINVAL;
978 goto err1; 985 goto err1;
979 } 986 }
980 987
981 rhte = rhte_checkout(ctxi, lli); 988 rhte = rhte_checkout(ctxi, lli);
982 if (unlikely(!rhte)) { 989 if (unlikely(!rhte)) {
983 dev_err(dev, "%s: too many opens for this context\n", __func__); 990 dev_err(dev, "%s: too many opens ctxid=%llu\n",
991 __func__, ctxid);
984 rc = -EMFILE; /* too many opens */ 992 rc = -EMFILE; /* too many opens */
985 goto err1; 993 goto err1;
986 } 994 }
@@ -996,7 +1004,7 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
996 resize.rsrc_handle = rsrc_handle; 1004 resize.rsrc_handle = rsrc_handle;
997 rc = _cxlflash_vlun_resize(sdev, ctxi, &resize); 1005 rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
998 if (rc) { 1006 if (rc) {
999 dev_err(dev, "%s: resize failed rc %d\n", __func__, rc); 1007 dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
1000 goto err2; 1008 goto err2;
1001 } 1009 }
1002 last_lba = resize.last_lba; 1010 last_lba = resize.last_lba;
@@ -1013,8 +1021,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
1013out: 1021out:
1014 if (likely(ctxi)) 1022 if (likely(ctxi))
1015 put_context(ctxi); 1023 put_context(ctxi);
1016 pr_debug("%s: returning handle 0x%llx rc=%d llba %lld\n", 1024 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
1017 __func__, rsrc_handle, rc, last_lba); 1025 __func__, rsrc_handle, rc, last_lba);
1018 return rc; 1026 return rc;
1019 1027
1020err2: 1028err2:
@@ -1047,6 +1055,8 @@ static int clone_lxt(struct afu *afu,
1047 struct sisl_rht_entry *rhte, 1055 struct sisl_rht_entry *rhte,
1048 struct sisl_rht_entry *rhte_src) 1056 struct sisl_rht_entry *rhte_src)
1049{ 1057{
1058 struct cxlflash_cfg *cfg = afu->parent;
1059 struct device *dev = &cfg->dev->dev;
1050 struct sisl_lxt_entry *lxt; 1060 struct sisl_lxt_entry *lxt;
1051 u32 ngrps; 1061 u32 ngrps;
1052 u64 aun; /* chunk# allocated by block allocator */ 1062 u64 aun; /* chunk# allocated by block allocator */
@@ -1101,7 +1111,7 @@ static int clone_lxt(struct afu *afu,
1101 1111
1102 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC); 1112 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
1103 1113
1104 pr_debug("%s: returning\n", __func__); 1114 dev_dbg(dev, "%s: returning\n", __func__);
1105 return 0; 1115 return 0;
1106} 1116}
1107 1117
@@ -1120,7 +1130,8 @@ static int clone_lxt(struct afu *afu,
1120int cxlflash_disk_clone(struct scsi_device *sdev, 1130int cxlflash_disk_clone(struct scsi_device *sdev,
1121 struct dk_cxlflash_clone *clone) 1131 struct dk_cxlflash_clone *clone)
1122{ 1132{
1123 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1133 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1134 struct device *dev = &cfg->dev->dev;
1124 struct llun_info *lli = sdev->hostdata; 1135 struct llun_info *lli = sdev->hostdata;
1125 struct glun_info *gli = lli->parent; 1136 struct glun_info *gli = lli->parent;
1126 struct blka *blka = &gli->blka; 1137 struct blka *blka = &gli->blka;
@@ -1140,8 +1151,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
1140 bool found; 1151 bool found;
1141 LIST_HEAD(sidecar); 1152 LIST_HEAD(sidecar);
1142 1153
1143 pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n", 1154 dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
1144 __func__, ctxid_src, ctxid_dst); 1155 __func__, ctxid_src, ctxid_dst);
1145 1156
1146 /* Do not clone yourself */ 1157 /* Do not clone yourself */
1147 if (unlikely(rctxid_src == rctxid_dst)) { 1158 if (unlikely(rctxid_src == rctxid_dst)) {
@@ -1151,16 +1162,16 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
1151 1162
1152 if (unlikely(gli->mode != MODE_VIRTUAL)) { 1163 if (unlikely(gli->mode != MODE_VIRTUAL)) {
1153 rc = -EINVAL; 1164 rc = -EINVAL;
1154 pr_debug("%s: Clone not supported on physical LUNs! (%d)\n", 1165 dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
1155 __func__, gli->mode); 1166 __func__, gli->mode);
1156 goto out; 1167 goto out;
1157 } 1168 }
1158 1169
1159 ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE); 1170 ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
1160 ctxi_dst = get_context(cfg, rctxid_dst, lli, 0); 1171 ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
1161 if (unlikely(!ctxi_src || !ctxi_dst)) { 1172 if (unlikely(!ctxi_src || !ctxi_dst)) {
1162 pr_debug("%s: Bad context! (%llu,%llu)\n", __func__, 1173 dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
1163 ctxid_src, ctxid_dst); 1174 __func__, ctxid_src, ctxid_dst);
1164 rc = -EINVAL; 1175 rc = -EINVAL;
1165 goto out; 1176 goto out;
1166 } 1177 }
@@ -1185,8 +1196,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
1185 lun_access_dst = kzalloc(sizeof(*lun_access_dst), 1196 lun_access_dst = kzalloc(sizeof(*lun_access_dst),
1186 GFP_KERNEL); 1197 GFP_KERNEL);
1187 if (unlikely(!lun_access_dst)) { 1198 if (unlikely(!lun_access_dst)) {
1188 pr_err("%s: Unable to allocate lun_access!\n", 1199 dev_err(dev, "%s: lun_access allocation fail\n",
1189 __func__); 1200 __func__);
1190 rc = -ENOMEM; 1201 rc = -ENOMEM;
1191 goto out; 1202 goto out;
1192 } 1203 }
@@ -1197,7 +1208,7 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
1197 } 1208 }
1198 1209
1199 if (unlikely(!ctxi_src->rht_out)) { 1210 if (unlikely(!ctxi_src->rht_out)) {
1200 pr_debug("%s: Nothing to clone!\n", __func__); 1211 dev_dbg(dev, "%s: Nothing to clone\n", __func__);
1201 goto out_success; 1212 goto out_success;
1202 } 1213 }
1203 1214
@@ -1256,7 +1267,7 @@ out:
1256 put_context(ctxi_src); 1267 put_context(ctxi_src);
1257 if (ctxi_dst) 1268 if (ctxi_dst)
1258 put_context(ctxi_dst); 1269 put_context(ctxi_dst);
1259 pr_debug("%s: returning rc=%d\n", __func__, rc); 1270 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1260 return rc; 1271 return rc;
1261 1272
1262err: 1273err:
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 5f75e638ec95..256dd6791fcc 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2768,16 +2768,12 @@ static int adpt_i2o_activate_hba(adpt_hba* pHba)
2768 2768
2769static int adpt_i2o_online_hba(adpt_hba* pHba) 2769static int adpt_i2o_online_hba(adpt_hba* pHba)
2770{ 2770{
2771 if (adpt_i2o_systab_send(pHba) < 0) { 2771 if (adpt_i2o_systab_send(pHba) < 0)
2772 adpt_i2o_delete_hba(pHba);
2773 return -1; 2772 return -1;
2774 }
2775 /* In READY state */ 2773 /* In READY state */
2776 2774
2777 if (adpt_i2o_enable_hba(pHba) < 0) { 2775 if (adpt_i2o_enable_hba(pHba) < 0)
2778 adpt_i2o_delete_hba(pHba);
2779 return -1; 2776 return -1;
2780 }
2781 2777
2782 /* In OPERATIONAL state */ 2778 /* In OPERATIONAL state */
2783 return 0; 2779 return 0;
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index d6e53aee2295..6432a50b26d8 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -237,7 +237,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)
237 flags |= IRQF_SHARED; 237 flags |= IRQF_SHARED;
238 238
239 esas2r_log(ESAS2R_LOG_INFO, 239 esas2r_log(ESAS2R_LOG_INFO,
240 "esas2r_claim_interrupts irq=%d (%p, %s, %x)", 240 "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
241 a->pcid->irq, a, a->name, flags); 241 a->pcid->irq, a, a->name, flags);
242 242
243 if (request_irq(a->pcid->irq, 243 if (request_irq(a->pcid->irq,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 3e8483410f61..b35ed3829421 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1301,7 +1301,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
1301 ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); 1301 ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
1302 if (ioctl == NULL) { 1302 if (ioctl == NULL) {
1303 esas2r_log(ESAS2R_LOG_WARN, 1303 esas2r_log(ESAS2R_LOG_WARN,
1304 "ioctl_handler kzalloc failed for %d bytes", 1304 "ioctl_handler kzalloc failed for %zu bytes",
1305 sizeof(struct atto_express_ioctl)); 1305 sizeof(struct atto_express_ioctl));
1306 return -ENOMEM; 1306 return -ENOMEM;
1307 } 1307 }
diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
index 7b6397bb5b94..75b9d23cd736 100644
--- a/drivers/scsi/esas2r/esas2r_log.h
+++ b/drivers/scsi/esas2r/esas2r_log.h
@@ -61,8 +61,8 @@ enum {
61#endif 61#endif
62}; 62};
63 63
64int esas2r_log(const long level, const char *format, ...); 64__printf(2, 3) int esas2r_log(const long level, const char *format, ...);
65int esas2r_log_dev(const long level, 65__printf(3, 4) int esas2r_log_dev(const long level,
66 const struct device *dev, 66 const struct device *dev,
67 const char *format, 67 const char *format,
68 ...); 68 ...);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 5092c821d088..f2e9d8aa979c 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -198,7 +198,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
198 GFP_KERNEL); 198 GFP_KERNEL);
199 if (a->local_atto_ioctl == NULL) { 199 if (a->local_atto_ioctl == NULL) {
200 esas2r_log(ESAS2R_LOG_WARN, 200 esas2r_log(ESAS2R_LOG_WARN,
201 "write_hw kzalloc failed for %d bytes", 201 "write_hw kzalloc failed for %zu bytes",
202 sizeof(struct atto_ioctl)); 202 sizeof(struct atto_ioctl));
203 return -ENOMEM; 203 return -ENOMEM;
204 } 204 }
@@ -1186,7 +1186,7 @@ retry:
1186 } else { 1186 } else {
1187 esas2r_log(ESAS2R_LOG_CRIT, 1187 esas2r_log(ESAS2R_LOG_CRIT,
1188 "unable to allocate a request for a " 1188 "unable to allocate a request for a "
1189 "device reset (%d:%d)!", 1189 "device reset (%d:%llu)!",
1190 cmd->device->id, 1190 cmd->device->id,
1191 cmd->device->lun); 1191 cmd->device->lun);
1192 } 1192 }
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 59150cad0353..86af57f7c11a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = {
277 .name = "FCoE Driver", 277 .name = "FCoE Driver",
278 .proc_name = FCOE_NAME, 278 .proc_name = FCOE_NAME,
279 .queuecommand = fc_queuecommand, 279 .queuecommand = fc_queuecommand,
280 .eh_timed_out = fc_eh_timed_out,
280 .eh_abort_handler = fc_eh_abort, 281 .eh_abort_handler = fc_eh_abort,
281 .eh_device_reset_handler = fc_eh_device_reset, 282 .eh_device_reset_handler = fc_eh_device_reset,
282 .eh_host_reset_handler = fc_eh_host_reset, 283 .eh_host_reset_handler = fc_eh_host_reset,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 58ce9020d69c..ba58b7953263 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -106,6 +106,7 @@ static struct scsi_host_template fnic_host_template = {
106 .module = THIS_MODULE, 106 .module = THIS_MODULE,
107 .name = DRV_NAME, 107 .name = DRV_NAME,
108 .queuecommand = fnic_queuecommand, 108 .queuecommand = fnic_queuecommand,
109 .eh_timed_out = fc_eh_timed_out,
109 .eh_abort_handler = fnic_abort_cmd, 110 .eh_abort_handler = fnic_abort_cmd,
110 .eh_device_reset_handler = fnic_device_reset, 111 .eh_device_reset_handler = fnic_device_reset,
111 .eh_host_reset_handler = fnic_host_reset, 112 .eh_host_reset_handler = fnic_host_reset,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 6f9665d50d84..67c8dac321ad 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -26,14 +26,55 @@
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include "g_NCR5380.h"
30#include "NCR5380.h"
31#include <linux/init.h> 29#include <linux/init.h>
32#include <linux/ioport.h> 30#include <linux/ioport.h>
33#include <linux/isa.h> 31#include <linux/isa.h>
34#include <linux/pnp.h> 32#include <linux/pnp.h>
35#include <linux/interrupt.h> 33#include <linux/interrupt.h>
36 34
35/* Definitions for the core NCR5380 driver. */
36
37#define NCR5380_read(reg) \
38 ioread8(hostdata->io + hostdata->offset + (reg))
39#define NCR5380_write(reg, value) \
40 iowrite8(value, hostdata->io + hostdata->offset + (reg))
41
42#define NCR5380_implementation_fields \
43 int offset; \
44 int c400_ctl_status; \
45 int c400_blk_cnt; \
46 int c400_host_buf; \
47 int io_width
48
49#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
50#define NCR5380_dma_recv_setup generic_NCR5380_pread
51#define NCR5380_dma_send_setup generic_NCR5380_pwrite
52#define NCR5380_dma_residual NCR5380_dma_residual_none
53
54#define NCR5380_intr generic_NCR5380_intr
55#define NCR5380_queue_command generic_NCR5380_queue_command
56#define NCR5380_abort generic_NCR5380_abort
57#define NCR5380_bus_reset generic_NCR5380_bus_reset
58#define NCR5380_info generic_NCR5380_info
59
60#define NCR5380_io_delay(x) udelay(x)
61
62#include "NCR5380.h"
63
64#define DRV_MODULE_NAME "g_NCR5380"
65
66#define NCR53C400_mem_base 0x3880
67#define NCR53C400_host_buffer 0x3900
68#define NCR53C400_region_size 0x3a00
69
70#define BOARD_NCR5380 0
71#define BOARD_NCR53C400 1
72#define BOARD_NCR53C400A 2
73#define BOARD_DTC3181E 3
74#define BOARD_HP_C2502 4
75
76#define IRQ_AUTO 254
77
37#define MAX_CARDS 8 78#define MAX_CARDS 8
38 79
39/* old-style parameters for compatibility */ 80/* old-style parameters for compatibility */
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
deleted file mode 100644
index 81b22d989648..000000000000
--- a/drivers/scsi/g_NCR5380.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Generic Generic NCR5380 driver defines
3 *
4 * Copyright 1993, Drew Eckhardt
5 * Visionary Computing
6 * (Unix and Linux consulting and custom programming)
7 * drew@colorado.edu
8 * +1 (303) 440-4894
9 *
10 * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
11 * K.Lentin@cs.monash.edu.au
12 */
13
14#ifndef GENERIC_NCR5380_H
15#define GENERIC_NCR5380_H
16
17#define DRV_MODULE_NAME "g_NCR5380"
18
19#define NCR5380_read(reg) \
20 ioread8(hostdata->io + hostdata->offset + (reg))
21#define NCR5380_write(reg, value) \
22 iowrite8(value, hostdata->io + hostdata->offset + (reg))
23
24#define NCR5380_implementation_fields \
25 int offset; \
26 int c400_ctl_status; \
27 int c400_blk_cnt; \
28 int c400_host_buf; \
29 int io_width;
30
31#define NCR53C400_mem_base 0x3880
32#define NCR53C400_host_buffer 0x3900
33#define NCR53C400_region_size 0x3a00
34
35#define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len
36#define NCR5380_dma_recv_setup generic_NCR5380_pread
37#define NCR5380_dma_send_setup generic_NCR5380_pwrite
38#define NCR5380_dma_residual NCR5380_dma_residual_none
39
40#define NCR5380_intr generic_NCR5380_intr
41#define NCR5380_queue_command generic_NCR5380_queue_command
42#define NCR5380_abort generic_NCR5380_abort
43#define NCR5380_bus_reset generic_NCR5380_bus_reset
44#define NCR5380_info generic_NCR5380_info
45
46#define NCR5380_io_delay(x) udelay(x)
47
48#define BOARD_NCR5380 0
49#define BOARD_NCR53C400 1
50#define BOARD_NCR53C400A 2
51#define BOARD_DTC3181E 3
52#define BOARD_HP_C2502 4
53
54#define IRQ_AUTO 254
55
56#endif /* GENERIC_NCR5380_H */
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index c0cd505a9ef7..9216deaa3ff5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -95,6 +95,7 @@ struct hisi_sas_port {
95 95
96struct hisi_sas_cq { 96struct hisi_sas_cq {
97 struct hisi_hba *hisi_hba; 97 struct hisi_hba *hisi_hba;
98 struct tasklet_struct tasklet;
98 int rd_point; 99 int rd_point;
99 int id; 100 int id;
100}; 101};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d50e9cfefd24..53637a941b94 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -71,6 +71,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
71 struct hisi_sas_slot *slot) 71 struct hisi_sas_slot *slot)
72{ 72{
73 struct device *dev = &hisi_hba->pdev->dev; 73 struct device *dev = &hisi_hba->pdev->dev;
74 struct domain_device *device = task->dev;
75 struct hisi_sas_device *sas_dev = device->lldd_dev;
74 76
75 if (!slot->task) 77 if (!slot->task)
76 return; 78 return;
@@ -97,6 +99,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
97 slot->task = NULL; 99 slot->task = NULL;
98 slot->port = NULL; 100 slot->port = NULL;
99 hisi_sas_slot_index_free(hisi_hba, slot->idx); 101 hisi_sas_slot_index_free(hisi_hba, slot->idx);
102 if (sas_dev)
103 atomic64_dec(&sas_dev->running_req);
100 /* slot memory is fully zeroed when it is reused */ 104 /* slot memory is fully zeroed when it is reused */
101} 105}
102EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 106EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -141,11 +145,10 @@ static void hisi_sas_slot_abort(struct work_struct *work)
141 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 145 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
142 struct scsi_cmnd *cmnd = task->uldd_task; 146 struct scsi_cmnd *cmnd = task->uldd_task;
143 struct hisi_sas_tmf_task tmf_task; 147 struct hisi_sas_tmf_task tmf_task;
144 struct domain_device *device = task->dev;
145 struct hisi_sas_device *sas_dev = device->lldd_dev;
146 struct scsi_lun lun; 148 struct scsi_lun lun;
147 struct device *dev = &hisi_hba->pdev->dev; 149 struct device *dev = &hisi_hba->pdev->dev;
148 int tag = abort_slot->idx; 150 int tag = abort_slot->idx;
151 unsigned long flags;
149 152
150 if (!(task->task_proto & SAS_PROTOCOL_SSP)) { 153 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
151 dev_err(dev, "cannot abort slot for non-ssp task\n"); 154 dev_err(dev, "cannot abort slot for non-ssp task\n");
@@ -159,11 +162,11 @@ static void hisi_sas_slot_abort(struct work_struct *work)
159 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); 162 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
160out: 163out:
161 /* Do cleanup for this task */ 164 /* Do cleanup for this task */
165 spin_lock_irqsave(&hisi_hba->lock, flags);
162 hisi_sas_slot_task_free(hisi_hba, task, abort_slot); 166 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
167 spin_unlock_irqrestore(&hisi_hba->lock, flags);
163 if (task->task_done) 168 if (task->task_done)
164 task->task_done(task); 169 task->task_done(task);
165 if (sas_dev)
166 atomic64_dec(&sas_dev->running_req);
167} 170}
168 171
169static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, 172static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -1118,7 +1121,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1118 } 1121 }
1119 1122
1120exit: 1123exit:
1121 dev_info(dev, "internal task abort: task to dev %016llx task=%p " 1124 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1122 "resp: 0x%x sts 0x%x\n", 1125 "resp: 0x%x sts 0x%x\n",
1123 SAS_ADDR(device->sas_addr), 1126 SAS_ADDR(device->sas_addr),
1124 task, 1127 task,
@@ -1450,7 +1453,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1450 1453
1451 refclk = devm_clk_get(&pdev->dev, NULL); 1454 refclk = devm_clk_get(&pdev->dev, NULL);
1452 if (IS_ERR(refclk)) 1455 if (IS_ERR(refclk))
1453 dev_info(dev, "no ref clk property\n"); 1456 dev_dbg(dev, "no ref clk property\n");
1454 else 1457 else
1455 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 1458 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1456 1459
@@ -1549,10 +1552,6 @@ int hisi_sas_probe(struct platform_device *pdev,
1549 1552
1550 hisi_sas_init_add(hisi_hba); 1553 hisi_sas_init_add(hisi_hba);
1551 1554
1552 rc = hisi_hba->hw->hw_init(hisi_hba);
1553 if (rc)
1554 goto err_out_ha;
1555
1556 rc = scsi_add_host(shost, &pdev->dev); 1555 rc = scsi_add_host(shost, &pdev->dev);
1557 if (rc) 1556 if (rc)
1558 goto err_out_ha; 1557 goto err_out_ha;
@@ -1561,6 +1560,10 @@ int hisi_sas_probe(struct platform_device *pdev,
1561 if (rc) 1560 if (rc)
1562 goto err_out_register_ha; 1561 goto err_out_register_ha;
1563 1562
1563 rc = hisi_hba->hw->hw_init(hisi_hba);
1564 if (rc)
1565 goto err_out_register_ha;
1566
1564 scsi_scan_host(shost); 1567 scsi_scan_host(shost);
1565 1568
1566 return 0; 1569 return 0;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8a1be0ba8a22..854fbeaade3e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1596,6 +1596,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
1596 hisi_hba->complete_hdr[queue]; 1596 hisi_hba->complete_hdr[queue];
1597 u32 irq_value, rd_point = cq->rd_point, wr_point; 1597 u32 irq_value, rd_point = cq->rd_point, wr_point;
1598 1598
1599 spin_lock(&hisi_hba->lock);
1599 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); 1600 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
1600 1601
1601 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 1602 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
@@ -1628,6 +1629,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
1628 /* update rd_point */ 1629 /* update rd_point */
1629 cq->rd_point = rd_point; 1630 cq->rd_point = rd_point;
1630 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 1631 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
1632 spin_unlock(&hisi_hba->lock);
1631 1633
1632 return IRQ_HANDLED; 1634 return IRQ_HANDLED;
1633} 1635}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b934aec1eebb..1b214450dcb5 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -207,6 +207,8 @@
207#define TXID_AUTO (PORT_BASE + 0xb8) 207#define TXID_AUTO (PORT_BASE + 0xb8)
208#define TXID_AUTO_CT3_OFF 1 208#define TXID_AUTO_CT3_OFF 1
209#define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) 209#define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF)
210#define TX_HARDRST_OFF 2
211#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
210#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 212#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
211#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) 213#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
212#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) 214#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
@@ -215,6 +217,7 @@
215#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8) 217#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8)
216#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) 218#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
217#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 219#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
220#define CON_CONTROL (PORT_BASE + 0x118)
218#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) 221#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
219#define CHL_INT0 (PORT_BASE + 0x1b4) 222#define CHL_INT0 (PORT_BASE + 0x1b4)
220#define CHL_INT0_HOTPLUG_TOUT_OFF 0 223#define CHL_INT0_HOTPLUG_TOUT_OFF 0
@@ -333,6 +336,11 @@
333#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF) 336#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
334#define ITCT_HDR_VLN_OFF 9 337#define ITCT_HDR_VLN_OFF 9
335#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF) 338#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
339#define ITCT_HDR_SMP_TIMEOUT_OFF 16
340#define ITCT_HDR_SMP_TIMEOUT_8US 1
341#define ITCT_HDR_SMP_TIMEOUT (ITCT_HDR_SMP_TIMEOUT_8US * \
342 250) /* 2ms */
343#define ITCT_HDR_AWT_CONTINUE_OFF 25
336#define ITCT_HDR_PORT_ID_OFF 28 344#define ITCT_HDR_PORT_ID_OFF 28
337#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF) 345#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
338/* qw2 */ 346/* qw2 */
@@ -526,6 +534,8 @@ enum {
526#define SATA_PROTOCOL_FPDMA 0x8 534#define SATA_PROTOCOL_FPDMA 0x8
527#define SATA_PROTOCOL_ATAPI 0x10 535#define SATA_PROTOCOL_ATAPI 0x10
528 536
537static void hisi_sas_link_timeout_disable_link(unsigned long data);
538
529static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 539static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
530{ 540{
531 void __iomem *regs = hisi_hba->regs + off; 541 void __iomem *regs = hisi_hba->regs + off;
@@ -693,6 +703,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
693 qw0 |= ((1 << ITCT_HDR_VALID_OFF) | 703 qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
694 (device->linkrate << ITCT_HDR_MCR_OFF) | 704 (device->linkrate << ITCT_HDR_MCR_OFF) |
695 (1 << ITCT_HDR_VLN_OFF) | 705 (1 << ITCT_HDR_VLN_OFF) |
706 (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) |
707 (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
696 (port->id << ITCT_HDR_PORT_ID_OFF)); 708 (port->id << ITCT_HDR_PORT_ID_OFF));
697 itct->qw0 = cpu_to_le64(qw0); 709 itct->qw0 = cpu_to_le64(qw0);
698 710
@@ -702,7 +714,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
702 714
703 /* qw2 */ 715 /* qw2 */
704 if (!dev_is_sata(device)) 716 if (!dev_is_sata(device))
705 itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | 717 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
706 (0x1ULL << ITCT_HDR_BITLT_OFF) | 718 (0x1ULL << ITCT_HDR_BITLT_OFF) |
707 (0x32ULL << ITCT_HDR_MCTLT_OFF) | 719 (0x32ULL << ITCT_HDR_MCTLT_OFF) |
708 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 720 (0x1ULL << ITCT_HDR_RTOLT_OFF));
@@ -711,7 +723,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
711static void free_device_v2_hw(struct hisi_hba *hisi_hba, 723static void free_device_v2_hw(struct hisi_hba *hisi_hba,
712 struct hisi_sas_device *sas_dev) 724 struct hisi_sas_device *sas_dev)
713{ 725{
714 u64 qw0, dev_id = sas_dev->device_id; 726 u64 dev_id = sas_dev->device_id;
715 struct device *dev = &hisi_hba->pdev->dev; 727 struct device *dev = &hisi_hba->pdev->dev;
716 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 728 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
717 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 729 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
@@ -735,8 +747,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
735 dev_dbg(dev, "got clear ITCT done interrupt\n"); 747 dev_dbg(dev, "got clear ITCT done interrupt\n");
736 748
737 /* invalid the itct state*/ 749 /* invalid the itct state*/
738 qw0 = cpu_to_le64(itct->qw0); 750 memset(itct, 0, sizeof(struct hisi_sas_itct));
739 qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
740 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 751 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
741 ENT_INT_SRC3_ITC_INT_MSK); 752 ENT_INT_SRC3_ITC_INT_MSK);
742 753
@@ -978,6 +989,50 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
978 upper_32_bits(hisi_hba->initial_fis_dma)); 989 upper_32_bits(hisi_hba->initial_fis_dma));
979} 990}
980 991
992static void hisi_sas_link_timeout_enable_link(unsigned long data)
993{
994 struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
995 int i, reg_val;
996
997 for (i = 0; i < hisi_hba->n_phy; i++) {
998 reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL);
999 if (!(reg_val & BIT(0))) {
1000 hisi_sas_phy_write32(hisi_hba, i,
1001 CON_CONTROL, 0x7);
1002 break;
1003 }
1004 }
1005
1006 hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
1007 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
1008}
1009
1010static void hisi_sas_link_timeout_disable_link(unsigned long data)
1011{
1012 struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
1013 int i, reg_val;
1014
1015 reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
1016 for (i = 0; i < hisi_hba->n_phy && reg_val; i++) {
1017 if (reg_val & BIT(i)) {
1018 hisi_sas_phy_write32(hisi_hba, i,
1019 CON_CONTROL, 0x6);
1020 break;
1021 }
1022 }
1023
1024 hisi_hba->timer.function = hisi_sas_link_timeout_enable_link;
1025 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
1026}
1027
1028static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
1029{
1030 hisi_hba->timer.data = (unsigned long)hisi_hba;
1031 hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
1032 hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
1033 add_timer(&hisi_hba->timer);
1034}
1035
981static int hw_init_v2_hw(struct hisi_hba *hisi_hba) 1036static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
982{ 1037{
983 struct device *dev = &hisi_hba->pdev->dev; 1038 struct device *dev = &hisi_hba->pdev->dev;
@@ -1025,14 +1080,21 @@ static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1025 1080
1026static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1081static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
1027{ 1082{
1083 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1084 u32 txid_auto;
1085
1028 stop_phy_v2_hw(hisi_hba, phy_no); 1086 stop_phy_v2_hw(hisi_hba, phy_no);
1087 if (phy->identify.device_type == SAS_END_DEVICE) {
1088 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
1089 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
1090 txid_auto | TX_HARDRST_MSK);
1091 }
1029 msleep(100); 1092 msleep(100);
1030 start_phy_v2_hw(hisi_hba, phy_no); 1093 start_phy_v2_hw(hisi_hba, phy_no);
1031} 1094}
1032 1095
1033static void start_phys_v2_hw(unsigned long data) 1096static void start_phys_v2_hw(struct hisi_hba *hisi_hba)
1034{ 1097{
1035 struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
1036 int i; 1098 int i;
1037 1099
1038 for (i = 0; i < hisi_hba->n_phy; i++) 1100 for (i = 0; i < hisi_hba->n_phy; i++)
@@ -1041,10 +1103,7 @@ static void start_phys_v2_hw(unsigned long data)
1041 1103
1042static void phys_init_v2_hw(struct hisi_hba *hisi_hba) 1104static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
1043{ 1105{
1044 struct timer_list *timer = &hisi_hba->timer; 1106 start_phys_v2_hw(hisi_hba);
1045
1046 setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
1047 mod_timer(timer, jiffies + HZ);
1048} 1107}
1049 1108
1050static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) 1109static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1771,8 +1830,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
1771 } 1830 }
1772 1831
1773out: 1832out:
1774 if (sas_dev)
1775 atomic64_dec(&sas_dev->running_req);
1776 1833
1777 hisi_sas_slot_task_free(hisi_hba, task, slot); 1834 hisi_sas_slot_task_free(hisi_hba, task, slot);
1778 sts = ts->stat; 1835 sts = ts->stat;
@@ -2020,9 +2077,12 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2020 if (phy->identify.device_type == SAS_END_DEVICE) 2077 if (phy->identify.device_type == SAS_END_DEVICE)
2021 phy->identify.target_port_protocols = 2078 phy->identify.target_port_protocols =
2022 SAS_PROTOCOL_SSP; 2079 SAS_PROTOCOL_SSP;
2023 else if (phy->identify.device_type != SAS_PHY_UNUSED) 2080 else if (phy->identify.device_type != SAS_PHY_UNUSED) {
2024 phy->identify.target_port_protocols = 2081 phy->identify.target_port_protocols =
2025 SAS_PROTOCOL_SMP; 2082 SAS_PROTOCOL_SMP;
2083 if (!timer_pending(&hisi_hba->timer))
2084 set_link_timer_quirk(hisi_hba);
2085 }
2026 queue_work(hisi_hba->wq, &phy->phyup_ws); 2086 queue_work(hisi_hba->wq, &phy->phyup_ws);
2027 2087
2028end: 2088end:
@@ -2033,10 +2093,23 @@ end:
2033 return res; 2093 return res;
2034} 2094}
2035 2095
2096static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba)
2097{
2098 u32 port_state;
2099
2100 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
2101 if (port_state & 0x1ff)
2102 return true;
2103
2104 return false;
2105}
2106
2036static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2107static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2037{ 2108{
2038 int res = 0; 2109 int res = 0;
2039 u32 phy_state, sl_ctrl, txid_auto; 2110 u32 phy_state, sl_ctrl, txid_auto;
2111 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2112 struct hisi_sas_port *port = phy->port;
2040 2113
2041 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 2114 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
2042 2115
@@ -2046,6 +2119,10 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2046 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 2119 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
2047 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, 2120 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
2048 sl_ctrl & ~SL_CONTROL_CTA_MSK); 2121 sl_ctrl & ~SL_CONTROL_CTA_MSK);
2122 if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id))
2123 if (!check_any_wideports_v2_hw(hisi_hba) &&
2124 timer_pending(&hisi_hba->timer))
2125 del_timer(&hisi_hba->timer);
2049 2126
2050 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 2127 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
2051 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, 2128 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -2481,21 +2558,19 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
2481 return IRQ_HANDLED; 2558 return IRQ_HANDLED;
2482} 2559}
2483 2560
2484static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) 2561static void cq_tasklet_v2_hw(unsigned long val)
2485{ 2562{
2486 struct hisi_sas_cq *cq = p; 2563 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
2487 struct hisi_hba *hisi_hba = cq->hisi_hba; 2564 struct hisi_hba *hisi_hba = cq->hisi_hba;
2488 struct hisi_sas_slot *slot; 2565 struct hisi_sas_slot *slot;
2489 struct hisi_sas_itct *itct; 2566 struct hisi_sas_itct *itct;
2490 struct hisi_sas_complete_v2_hdr *complete_queue; 2567 struct hisi_sas_complete_v2_hdr *complete_queue;
2491 u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id; 2568 u32 rd_point = cq->rd_point, wr_point, dev_id;
2492 int queue = cq->id; 2569 int queue = cq->id;
2493 2570
2494 complete_queue = hisi_hba->complete_hdr[queue]; 2571 complete_queue = hisi_hba->complete_hdr[queue];
2495 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
2496
2497 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
2498 2572
2573 spin_lock(&hisi_hba->lock);
2499 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 2574 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
2500 (0x14 * queue)); 2575 (0x14 * queue));
2501 2576
@@ -2545,6 +2620,19 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
2545 /* update rd_point */ 2620 /* update rd_point */
2546 cq->rd_point = rd_point; 2621 cq->rd_point = rd_point;
2547 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 2622 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
2623 spin_unlock(&hisi_hba->lock);
2624}
2625
2626static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
2627{
2628 struct hisi_sas_cq *cq = p;
2629 struct hisi_hba *hisi_hba = cq->hisi_hba;
2630 int queue = cq->id;
2631
2632 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
2633
2634 tasklet_schedule(&cq->tasklet);
2635
2548 return IRQ_HANDLED; 2636 return IRQ_HANDLED;
2549} 2637}
2550 2638
@@ -2726,6 +2814,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
2726 2814
2727 for (i = 0; i < hisi_hba->queue_count; i++) { 2815 for (i = 0; i < hisi_hba->queue_count; i++) {
2728 int idx = i + 96; /* First cq interrupt is irq96 */ 2816 int idx = i + 96; /* First cq interrupt is irq96 */
2817 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2818 struct tasklet_struct *t = &cq->tasklet;
2729 2819
2730 irq = irq_map[idx]; 2820 irq = irq_map[idx];
2731 if (!irq) { 2821 if (!irq) {
@@ -2742,6 +2832,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
2742 irq, rc); 2832 irq, rc);
2743 return -ENOENT; 2833 return -ENOENT;
2744 } 2834 }
2835 tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
2745 } 2836 }
2746 2837
2747 return 0; 2838 return 0;
@@ -2807,6 +2898,12 @@ static int hisi_sas_v2_probe(struct platform_device *pdev)
2807 2898
2808static int hisi_sas_v2_remove(struct platform_device *pdev) 2899static int hisi_sas_v2_remove(struct platform_device *pdev)
2809{ 2900{
2901 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2902 struct hisi_hba *hisi_hba = sha->lldd_ha;
2903
2904 if (timer_pending(&hisi_hba->timer))
2905 del_timer(&hisi_hba->timer);
2906
2810 return hisi_sas_remove(pdev); 2907 return hisi_sas_remove(pdev);
2811} 2908}
2812 2909
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c611412a8de9..524a0c755ed7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -9263,13 +9263,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9263 access = SA5_ioaccel_mode1_access; 9263 access = SA5_ioaccel_mode1_access;
9264 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 9264 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9265 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 9265 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9266 } else { 9266 } else
9267 if (trans_support & CFGTBL_Trans_io_accel2) { 9267 if (trans_support & CFGTBL_Trans_io_accel2)
9268 access = SA5_ioaccel_mode2_access; 9268 access = SA5_ioaccel_mode2_access;
9269 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9270 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9271 }
9272 }
9273 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 9269 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9274 if (hpsa_wait_for_mode_change_ack(h)) { 9270 if (hpsa_wait_for_mode_change_ack(h)) {
9275 dev_err(&h->pdev->dev, 9271 dev_err(&h->pdev->dev,
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 64e98295b707..bf6cdc106654 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -578,38 +578,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
578} 578}
579 579
580static struct access_method SA5_access = { 580static struct access_method SA5_access = {
581 SA5_submit_command, 581 .submit_command = SA5_submit_command,
582 SA5_intr_mask, 582 .set_intr_mask = SA5_intr_mask,
583 SA5_intr_pending, 583 .intr_pending = SA5_intr_pending,
584 SA5_completed, 584 .command_completed = SA5_completed,
585}; 585};
586 586
587static struct access_method SA5_ioaccel_mode1_access = { 587static struct access_method SA5_ioaccel_mode1_access = {
588 SA5_submit_command, 588 .submit_command = SA5_submit_command,
589 SA5_performant_intr_mask, 589 .set_intr_mask = SA5_performant_intr_mask,
590 SA5_ioaccel_mode1_intr_pending, 590 .intr_pending = SA5_ioaccel_mode1_intr_pending,
591 SA5_ioaccel_mode1_completed, 591 .command_completed = SA5_ioaccel_mode1_completed,
592}; 592};
593 593
594static struct access_method SA5_ioaccel_mode2_access = { 594static struct access_method SA5_ioaccel_mode2_access = {
595 SA5_submit_command_ioaccel2, 595 .submit_command = SA5_submit_command_ioaccel2,
596 SA5_performant_intr_mask, 596 .set_intr_mask = SA5_performant_intr_mask,
597 SA5_performant_intr_pending, 597 .intr_pending = SA5_performant_intr_pending,
598 SA5_performant_completed, 598 .command_completed = SA5_performant_completed,
599}; 599};
600 600
601static struct access_method SA5_performant_access = { 601static struct access_method SA5_performant_access = {
602 SA5_submit_command, 602 .submit_command = SA5_submit_command,
603 SA5_performant_intr_mask, 603 .set_intr_mask = SA5_performant_intr_mask,
604 SA5_performant_intr_pending, 604 .intr_pending = SA5_performant_intr_pending,
605 SA5_performant_completed, 605 .command_completed = SA5_performant_completed,
606}; 606};
607 607
608static struct access_method SA5_performant_access_no_read = { 608static struct access_method SA5_performant_access_no_read = {
609 SA5_submit_command_no_read, 609 .submit_command = SA5_submit_command_no_read,
610 SA5_performant_intr_mask, 610 .set_intr_mask = SA5_performant_intr_mask,
611 SA5_performant_intr_pending, 611 .intr_pending = SA5_performant_intr_pending,
612 SA5_performant_completed, 612 .command_completed = SA5_performant_completed,
613}; 613};
614 614
615struct board_type { 615struct board_type {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 78b72c28a55d..2c92dabb55f6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3090,6 +3090,7 @@ static struct scsi_host_template driver_template = {
3090 .name = "IBM POWER Virtual FC Adapter", 3090 .name = "IBM POWER Virtual FC Adapter",
3091 .proc_name = IBMVFC_NAME, 3091 .proc_name = IBMVFC_NAME,
3092 .queuecommand = ibmvfc_queuecommand, 3092 .queuecommand = ibmvfc_queuecommand,
3093 .eh_timed_out = fc_eh_timed_out,
3093 .eh_abort_handler = ibmvfc_eh_abort_handler, 3094 .eh_abort_handler = ibmvfc_eh_abort_handler,
3094 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler, 3095 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3095 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler, 3096 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 50cd01165e35..1deb0a9f14a6 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2072,6 +2072,7 @@ static struct scsi_host_template driver_template = {
2072 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, 2072 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
2073 .proc_name = "ibmvscsi", 2073 .proc_name = "ibmvscsi",
2074 .queuecommand = ibmvscsi_queuecommand, 2074 .queuecommand = ibmvscsi_queuecommand,
2075 .eh_timed_out = srp_timed_out,
2075 .eh_abort_handler = ibmvscsi_eh_abort_handler, 2076 .eh_abort_handler = ibmvscsi_eh_abort_handler,
2076 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 2077 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
2077 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, 2078 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index ace4f1f41b8e..4228aba1f654 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -967,6 +967,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
967 .sg_tablesize = 4096, 967 .sg_tablesize = 4096,
968 .max_sectors = 0xFFFF, 968 .max_sectors = 0xFFFF,
969 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 969 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
970 .eh_timed_out = iscsi_eh_cmd_timed_out,
970 .eh_abort_handler = iscsi_eh_abort, 971 .eh_abort_handler = iscsi_eh_abort,
971 .eh_device_reset_handler= iscsi_eh_device_reset, 972 .eh_device_reset_handler= iscsi_eh_device_reset,
972 .eh_target_reset_handler = iscsi_eh_recover_target, 973 .eh_target_reset_handler = iscsi_eh_recover_target,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f9b6fba689ff..834d1212b6d5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1930,7 +1930,7 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1930 return 0; 1930 return 0;
1931} 1931}
1932 1932
1933static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) 1933enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1934{ 1934{
1935 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; 1935 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1936 struct iscsi_task *task = NULL, *running_task; 1936 struct iscsi_task *task = NULL, *running_task;
@@ -2063,6 +2063,7 @@ done:
2063 "timer reset" : "nh"); 2063 "timer reset" : "nh");
2064 return rc; 2064 return rc;
2065} 2065}
2066EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
2066 2067
2067static void iscsi_check_transport_timeouts(unsigned long data) 2068static void iscsi_check_transport_timeouts(unsigned long data)
2068{ 2069{
@@ -2585,8 +2586,6 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
2585 if (!shost->cmd_per_lun) 2586 if (!shost->cmd_per_lun)
2586 shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN; 2587 shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
2587 2588
2588 if (!shost->transportt->eh_timed_out)
2589 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
2590 return scsi_add_host(shost, pdev); 2589 return scsi_add_host(shost, pdev);
2591} 2590}
2592EXPORT_SYMBOL_GPL(iscsi_host_add); 2591EXPORT_SYMBOL_GPL(iscsi_host_add);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 362da44f2948..15ef8e2e685c 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -560,7 +560,6 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
560 i = to_sas_internal(stt); 560 i = to_sas_internal(stt);
561 i->dft = dft; 561 i->dft = dft;
562 stt->create_work_queue = 1; 562 stt->create_work_queue = 1;
563 stt->eh_timed_out = sas_scsi_timed_out;
564 stt->eh_strategy_handler = sas_scsi_recover_host; 563 stt->eh_strategy_handler = sas_scsi_recover_host;
565 564
566 return stt; 565 return stt;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 9cf0bc260b0e..b306b7843d99 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -64,8 +64,6 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
64int sas_register_ports(struct sas_ha_struct *sas_ha); 64int sas_register_ports(struct sas_ha_struct *sas_ha);
65void sas_unregister_ports(struct sas_ha_struct *sas_ha); 65void sas_unregister_ports(struct sas_ha_struct *sas_ha);
66 66
67enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
68
69int sas_init_events(struct sas_ha_struct *sas_ha); 67int sas_init_events(struct sas_ha_struct *sas_ha);
70void sas_disable_revalidation(struct sas_ha_struct *ha); 68void sas_disable_revalidation(struct sas_ha_struct *ha);
71void sas_enable_revalidation(struct sas_ha_struct *ha); 69void sas_enable_revalidation(struct sas_ha_struct *ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 519dac4e341e..9bd55bce83af 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -803,13 +803,6 @@ out:
803 shost->host_failed, tries); 803 shost->host_failed, tries);
804} 804}
805 805
806enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
807{
808 scmd_dbg(cmd, "command %p timed out\n", cmd);
809
810 return BLK_EH_NOT_HANDLED;
811}
812
813int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 806int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
814{ 807{
815 struct domain_device *dev = sdev_to_domain_dev(sdev); 808 struct domain_device *dev = sdev_to_domain_dev(sdev);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8a20b4e86224..6593b073c524 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -727,7 +727,6 @@ struct lpfc_hba {
727 uint32_t cfg_fcp_io_channel; 727 uint32_t cfg_fcp_io_channel;
728 uint32_t cfg_total_seg_cnt; 728 uint32_t cfg_total_seg_cnt;
729 uint32_t cfg_sg_seg_cnt; 729 uint32_t cfg_sg_seg_cnt;
730 uint32_t cfg_prot_sg_seg_cnt;
731 uint32_t cfg_sg_dma_buf_size; 730 uint32_t cfg_sg_dma_buf_size;
732 uint64_t cfg_soft_wwnn; 731 uint64_t cfg_soft_wwnn;
733 uint64_t cfg_soft_wwpn; 732 uint64_t cfg_soft_wwpn;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c84775562c65..50cf402dea29 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2073,6 +2073,13 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2073 return -EINVAL; 2073 return -EINVAL;
2074 2074
2075 phba->soft_wwn_enable = 1; 2075 phba->soft_wwn_enable = 1;
2076
2077 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2078 "lpfc%d: soft_wwpn assignment has been enabled.\n",
2079 phba->brd_no);
2080 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2081 " The soft_wwpn feature is not supported by Broadcom.");
2082
2076 return count; 2083 return count;
2077} 2084}
2078static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL, 2085static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
@@ -2143,7 +2150,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2143 phba->soft_wwn_enable = 0; 2150 phba->soft_wwn_enable = 0;
2144 2151
2145 rc = lpfc_wwn_set(buf, cnt, wwpn); 2152 rc = lpfc_wwn_set(buf, cnt, wwpn);
2146 if (!rc) { 2153 if (rc) {
2147 /* not able to set wwpn, unlock it */ 2154 /* not able to set wwpn, unlock it */
2148 phba->soft_wwn_enable = 1; 2155 phba->soft_wwn_enable = 1;
2149 return rc; 2156 return rc;
@@ -2224,7 +2231,7 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2224 return -EINVAL; 2231 return -EINVAL;
2225 2232
2226 rc = lpfc_wwn_set(buf, cnt, wwnn); 2233 rc = lpfc_wwn_set(buf, cnt, wwnn);
2227 if (!rc) { 2234 if (rc) {
2228 /* Allow wwnn to be set many times, as long as the enable 2235 /* Allow wwnn to be set many times, as long as the enable
2229 * is set. However, once the wwpn is set, everything locks. 2236 * is set. However, once the wwpn is set, everything locks.
2230 */ 2237 */
@@ -2435,7 +2442,8 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
2435 else 2442 else
2436 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; 2443 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
2437 phba->cfg_oas_flags &= ~OAS_LUN_VALID; 2444 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2438 phba->cfg_oas_priority = phba->cfg_XLanePriority; 2445 if (phba->cfg_oas_priority == 0)
2446 phba->cfg_oas_priority = phba->cfg_XLanePriority;
2439 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; 2447 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2440 return count; 2448 return count;
2441} 2449}
@@ -2561,7 +2569,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2561 rc = -ENOMEM; 2569 rc = -ENOMEM;
2562 } else { 2570 } else {
2563 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, 2571 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
2564 (struct lpfc_name *)tgt_wwpn, lun); 2572 (struct lpfc_name *)tgt_wwpn, lun, pri);
2565 } 2573 }
2566 return rc; 2574 return rc;
2567 2575
@@ -2585,7 +2593,8 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2585 */ 2593 */
2586static uint64_t 2594static uint64_t
2587lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], 2595lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2588 uint8_t tgt_wwpn[], uint32_t *lun_status) 2596 uint8_t tgt_wwpn[], uint32_t *lun_status,
2597 uint32_t *lun_pri)
2589{ 2598{
2590 uint64_t found_lun; 2599 uint64_t found_lun;
2591 2600
@@ -2598,7 +2607,7 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
2598 &phba->sli4_hba.oas_next_lun, 2607 &phba->sli4_hba.oas_next_lun,
2599 (struct lpfc_name *)vpt_wwpn, 2608 (struct lpfc_name *)vpt_wwpn,
2600 (struct lpfc_name *)tgt_wwpn, 2609 (struct lpfc_name *)tgt_wwpn,
2601 &found_lun, lun_status)) 2610 &found_lun, lun_status, lun_pri))
2602 return found_lun; 2611 return found_lun;
2603 else 2612 else
2604 return NOT_OAS_ENABLED_LUN; 2613 return NOT_OAS_ENABLED_LUN;
@@ -2670,7 +2679,8 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
2670 2679
2671 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, 2680 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
2672 phba->cfg_oas_tgt_wwpn, 2681 phba->cfg_oas_tgt_wwpn,
2673 &phba->cfg_oas_lun_status); 2682 &phba->cfg_oas_lun_status,
2683 &phba->cfg_oas_priority);
2674 if (oas_lun != NOT_OAS_ENABLED_LUN) 2684 if (oas_lun != NOT_OAS_ENABLED_LUN)
2675 phba->cfg_oas_flags |= OAS_LUN_VALID; 2685 phba->cfg_oas_flags |= OAS_LUN_VALID;
2676 2686
@@ -2701,6 +2711,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
2701 struct Scsi_Host *shost = class_to_shost(dev); 2711 struct Scsi_Host *shost = class_to_shost(dev);
2702 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2712 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2703 uint64_t scsi_lun; 2713 uint64_t scsi_lun;
2714 uint32_t pri;
2704 ssize_t rc; 2715 ssize_t rc;
2705 2716
2706 if (!phba->cfg_fof) 2717 if (!phba->cfg_fof)
@@ -2718,17 +2729,20 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
2718 if (sscanf(buf, "0x%llx", &scsi_lun) != 1) 2729 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
2719 return -EINVAL; 2730 return -EINVAL;
2720 2731
2732 pri = phba->cfg_oas_priority;
2733 if (pri == 0)
2734 pri = phba->cfg_XLanePriority;
2735
2721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2736 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2722 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " 2737 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
2723 "priority 0x%x with oas state %d\n", 2738 "priority 0x%x with oas state %d\n",
2724 wwn_to_u64(phba->cfg_oas_vpt_wwpn), 2739 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
2725 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, 2740 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
2726 phba->cfg_oas_priority, phba->cfg_oas_lun_state); 2741 pri, phba->cfg_oas_lun_state);
2727 2742
2728 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, 2743 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
2729 phba->cfg_oas_tgt_wwpn, scsi_lun, 2744 phba->cfg_oas_tgt_wwpn, scsi_lun,
2730 phba->cfg_oas_lun_state, 2745 phba->cfg_oas_lun_state, pri);
2731 phba->cfg_oas_priority);
2732 if (rc) 2746 if (rc)
2733 return rc; 2747 return rc;
2734 2748
@@ -4670,14 +4684,6 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
4670 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 4684 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
4671 4685
4672/* 4686/*
4673 * This parameter will be depricated, the driver cannot limit the
4674 * protection data s/g list.
4675 */
4676LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
4677 LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
4678 "Max Protection Scatter Gather Segment Count");
4679
4680/*
4681 * lpfc_enable_mds_diags: Enable MDS Diagnostics 4687 * lpfc_enable_mds_diags: Enable MDS Diagnostics
4682 * 0 = MDS Diagnostics disabled (default) 4688 * 0 = MDS Diagnostics disabled (default)
4683 * 1 = MDS Diagnostics enabled 4689 * 1 = MDS Diagnostics enabled
@@ -4766,7 +4772,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
4766 &dev_attr_lpfc_sg_seg_cnt, 4772 &dev_attr_lpfc_sg_seg_cnt,
4767 &dev_attr_lpfc_max_scsicmpl_time, 4773 &dev_attr_lpfc_max_scsicmpl_time,
4768 &dev_attr_lpfc_stat_data_ctrl, 4774 &dev_attr_lpfc_stat_data_ctrl,
4769 &dev_attr_lpfc_prot_sg_seg_cnt,
4770 &dev_attr_lpfc_aer_support, 4775 &dev_attr_lpfc_aer_support,
4771 &dev_attr_lpfc_aer_state_cleanup, 4776 &dev_attr_lpfc_aer_state_cleanup,
4772 &dev_attr_lpfc_sriov_nr_virtfn, 4777 &dev_attr_lpfc_sriov_nr_virtfn,
@@ -5061,6 +5066,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
5061 */ 5066 */
5062 5067
5063/** 5068/**
5069 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
5070 * @shost: kernel scsi host pointer.
5071 **/
5072static void
5073lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
5074{
5075 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5076
5077 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5078 sizeof fc_host_symbolic_name(shost));
5079}
5080
5081/**
5064 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id 5082 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
5065 * @shost: kernel scsi host pointer. 5083 * @shost: kernel scsi host pointer.
5066 **/ 5084 **/
@@ -5597,6 +5615,8 @@ struct fc_function_template lpfc_transport_functions = {
5597 .show_host_supported_fc4s = 1, 5615 .show_host_supported_fc4s = 1,
5598 .show_host_supported_speeds = 1, 5616 .show_host_supported_speeds = 1,
5599 .show_host_maxframe_size = 1, 5617 .show_host_maxframe_size = 1,
5618
5619 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
5600 .show_host_symbolic_name = 1, 5620 .show_host_symbolic_name = 1,
5601 5621
5602 /* dynamic attributes the driver supports */ 5622 /* dynamic attributes the driver supports */
@@ -5664,6 +5684,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
5664 .show_host_supported_fc4s = 1, 5684 .show_host_supported_fc4s = 1,
5665 .show_host_supported_speeds = 1, 5685 .show_host_supported_speeds = 1,
5666 .show_host_maxframe_size = 1, 5686 .show_host_maxframe_size = 1,
5687
5688 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
5667 .show_host_symbolic_name = 1, 5689 .show_host_symbolic_name = 1,
5668 5690
5669 /* dynamic attributes the driver supports */ 5691 /* dynamic attributes the driver supports */
@@ -5768,7 +5790,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5768 phba->cfg_soft_wwnn = 0L; 5790 phba->cfg_soft_wwnn = 0L;
5769 phba->cfg_soft_wwpn = 0L; 5791 phba->cfg_soft_wwpn = 0L;
5770 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 5792 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
5771 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
5772 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 5793 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
5773 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 5794 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
5774 lpfc_aer_support_init(phba, lpfc_aer_support); 5795 lpfc_aer_support_init(phba, lpfc_aer_support);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 15d2bfdf582d..309643a2c55c 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -480,7 +480,7 @@ void lpfc_sli4_offline_eratt(struct lpfc_hba *);
480struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *, 480struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
481 struct lpfc_name *, 481 struct lpfc_name *,
482 struct lpfc_name *, 482 struct lpfc_name *,
483 uint64_t, bool); 483 uint64_t, uint32_t, bool);
484void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*); 484void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
485struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, 485struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
486 struct list_head *list, 486 struct list_head *list,
@@ -489,9 +489,10 @@ struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
489bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, 489bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
490 struct lpfc_name *, uint64_t, uint8_t); 490 struct lpfc_name *, uint64_t, uint8_t);
491bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, 491bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
492 struct lpfc_name *, uint64_t); 492 struct lpfc_name *, uint64_t, uint8_t);
493bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, 493bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
494 struct lpfc_name *, uint64_t *, struct lpfc_name *, 494 struct lpfc_name *, uint64_t *, struct lpfc_name *,
495 struct lpfc_name *, uint64_t *, uint32_t *); 495 struct lpfc_name *, uint64_t *,
496 uint32_t *, uint32_t *);
496int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); 497int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
497void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); 498void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 63bef4566548..3a1f1a2a2b55 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1999,6 +1999,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1999 if (sp->cmn.fcphHigh < FC_PH3) 1999 if (sp->cmn.fcphHigh < FC_PH3)
2000 sp->cmn.fcphHigh = FC_PH3; 2000 sp->cmn.fcphHigh = FC_PH3;
2001 2001
2002 sp->cmn.valid_vendor_ver_level = 0;
2003 memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
2004
2002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2005 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2003 "Issue PLOGI: did:x%x", 2006 "Issue PLOGI: did:x%x",
2004 did, 0, 0); 2007 did, 0, 0);
@@ -3990,6 +3993,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3990 } else { 3993 } else {
3991 memcpy(pcmd, &vport->fc_sparam, 3994 memcpy(pcmd, &vport->fc_sparam,
3992 sizeof(struct serv_parm)); 3995 sizeof(struct serv_parm));
3996
3997 sp->cmn.valid_vendor_ver_level = 0;
3998 memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
3993 } 3999 }
3994 4000
3995 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
@@ -8851,8 +8857,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8851{ 8857{
8852 struct ls_rjt stat; 8858 struct ls_rjt stat;
8853 8859
8854 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC) 8860 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
8855 BUG();
8856 8861
8857 switch (rspiocb->iocb.ulpStatus) { 8862 switch (rspiocb->iocb.ulpStatus) {
8858 case IOSTAT_NPORT_RJT: 8863 case IOSTAT_NPORT_RJT:
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 822654322e67..3b970d370600 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -360,6 +360,12 @@ struct csp {
360 * Word 1 Bit 30 in PLOGI request is random offset 360 * Word 1 Bit 30 in PLOGI request is random offset
361 */ 361 */
362#define virtual_fabric_support randomOffset /* Word 1, bit 30 */ 362#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
363/*
364 * Word 1 Bit 29 in common service parameter is overloaded.
365 * Word 1 Bit 29 in FLOGI response is multiple NPort assignment
366 * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level
367 */
368#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */
363#ifdef __BIG_ENDIAN_BITFIELD 369#ifdef __BIG_ENDIAN_BITFIELD
364 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ 370 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
365 uint16_t randomOffset:1; /* FC Word 1, bit 30 */ 371 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ad350d969bdc..1180a22beb43 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5452,7 +5452,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5452 device_data = lpfc_create_device_data(phba, 5452 device_data = lpfc_create_device_data(phba,
5453 &vport->fc_portname, 5453 &vport->fc_portname,
5454 &target_wwpn, 5454 &target_wwpn,
5455 sdev->lun, true); 5455 sdev->lun,
5456 phba->cfg_XLanePriority,
5457 true);
5456 if (!device_data) 5458 if (!device_data)
5457 return -ENOMEM; 5459 return -ENOMEM;
5458 spin_lock_irqsave(&phba->devicelock, flags); 5460 spin_lock_irqsave(&phba->devicelock, flags);
@@ -5587,7 +5589,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
5587struct lpfc_device_data* 5589struct lpfc_device_data*
5588lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 5590lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5589 struct lpfc_name *target_wwpn, uint64_t lun, 5591 struct lpfc_name *target_wwpn, uint64_t lun,
5590 bool atomic_create) 5592 uint32_t pri, bool atomic_create)
5591{ 5593{
5592 5594
5593 struct lpfc_device_data *lun_info; 5595 struct lpfc_device_data *lun_info;
@@ -5614,7 +5616,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5614 sizeof(struct lpfc_name)); 5616 sizeof(struct lpfc_name));
5615 lun_info->device_id.lun = lun; 5617 lun_info->device_id.lun = lun;
5616 lun_info->oas_enabled = false; 5618 lun_info->oas_enabled = false;
5617 lun_info->priority = phba->cfg_XLanePriority; 5619 lun_info->priority = pri;
5618 lun_info->available = false; 5620 lun_info->available = false;
5619 return lun_info; 5621 return lun_info;
5620} 5622}
@@ -5716,7 +5718,8 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5716 struct lpfc_name *found_vport_wwpn, 5718 struct lpfc_name *found_vport_wwpn,
5717 struct lpfc_name *found_target_wwpn, 5719 struct lpfc_name *found_target_wwpn,
5718 uint64_t *found_lun, 5720 uint64_t *found_lun,
5719 uint32_t *found_lun_status) 5721 uint32_t *found_lun_status,
5722 uint32_t *found_lun_pri)
5720{ 5723{
5721 5724
5722 unsigned long flags; 5725 unsigned long flags;
@@ -5763,6 +5766,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5763 OAS_LUN_STATUS_EXISTS; 5766 OAS_LUN_STATUS_EXISTS;
5764 else 5767 else
5765 *found_lun_status = 0; 5768 *found_lun_status = 0;
5769 *found_lun_pri = lun_info->priority;
5766 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 5770 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5767 memset(vport_wwpn, 0x0, 5771 memset(vport_wwpn, 0x0,
5768 sizeof(struct lpfc_name)); 5772 sizeof(struct lpfc_name));
@@ -5824,13 +5828,14 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5824 if (lun_info) { 5828 if (lun_info) {
5825 if (!lun_info->oas_enabled) 5829 if (!lun_info->oas_enabled)
5826 lun_info->oas_enabled = true; 5830 lun_info->oas_enabled = true;
5831 lun_info->priority = pri;
5827 spin_unlock_irqrestore(&phba->devicelock, flags); 5832 spin_unlock_irqrestore(&phba->devicelock, flags);
5828 return true; 5833 return true;
5829 } 5834 }
5830 5835
5831 /* Create an lun info structure and add to list of luns */ 5836 /* Create an lun info structure and add to list of luns */
5832 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 5837 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5833 false); 5838 pri, false);
5834 if (lun_info) { 5839 if (lun_info) {
5835 lun_info->oas_enabled = true; 5840 lun_info->oas_enabled = true;
5836 lun_info->priority = pri; 5841 lun_info->priority = pri;
@@ -5864,7 +5869,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5864 **/ 5869 **/
5865bool 5870bool
5866lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 5871lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5867 struct lpfc_name *target_wwpn, uint64_t lun) 5872 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5868{ 5873{
5869 5874
5870 struct lpfc_device_data *lun_info; 5875 struct lpfc_device_data *lun_info;
@@ -5882,6 +5887,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5882 target_wwpn, lun); 5887 target_wwpn, lun);
5883 if (lun_info) { 5888 if (lun_info) {
5884 lun_info->oas_enabled = false; 5889 lun_info->oas_enabled = false;
5890 lun_info->priority = pri;
5885 if (!lun_info->available) 5891 if (!lun_info->available)
5886 lpfc_delete_device_data(phba, lun_info); 5892 lpfc_delete_device_data(phba, lun_info);
5887 spin_unlock_irqrestore(&phba->devicelock, flags); 5893 spin_unlock_irqrestore(&phba->devicelock, flags);
@@ -5923,6 +5929,7 @@ struct scsi_host_template lpfc_template = {
5923 .proc_name = LPFC_DRIVER_NAME, 5929 .proc_name = LPFC_DRIVER_NAME,
5924 .info = lpfc_info, 5930 .info = lpfc_info,
5925 .queuecommand = lpfc_queuecommand, 5931 .queuecommand = lpfc_queuecommand,
5932 .eh_timed_out = fc_eh_timed_out,
5926 .eh_abort_handler = lpfc_abort_handler, 5933 .eh_abort_handler = lpfc_abort_handler,
5927 .eh_device_reset_handler = lpfc_device_reset_handler, 5934 .eh_device_reset_handler = lpfc_device_reset_handler,
5928 .eh_target_reset_handler = lpfc_target_reset_handler, 5935 .eh_target_reset_handler = lpfc_target_reset_handler,
@@ -5949,6 +5956,7 @@ struct scsi_host_template lpfc_vport_template = {
5949 .proc_name = LPFC_DRIVER_NAME, 5956 .proc_name = LPFC_DRIVER_NAME,
5950 .info = lpfc_info, 5957 .info = lpfc_info,
5951 .queuecommand = lpfc_queuecommand, 5958 .queuecommand = lpfc_queuecommand,
5959 .eh_timed_out = fc_eh_timed_out,
5952 .eh_abort_handler = lpfc_abort_handler, 5960 .eh_abort_handler = lpfc_abort_handler,
5953 .eh_device_reset_handler = lpfc_device_reset_handler, 5961 .eh_device_reset_handler = lpfc_device_reset_handler,
5954 .eh_target_reset_handler = lpfc_target_reset_handler, 5962 .eh_target_reset_handler = lpfc_target_reset_handler,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a78a3df68f67..d977a472f89f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -120,6 +120,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
120 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 120 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
121 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 121 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
122 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 122 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
123 /* ensure WQE bcopy flushed before doorbell write */
124 wmb();
123 125
124 /* Update the host index before invoking device */ 126 /* Update the host index before invoking device */
125 host_index = q->host_index; 127 host_index = q->host_index;
@@ -6313,7 +6315,8 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6313 LPFC_SLI4_MBX_EMBED); 6315 LPFC_SLI4_MBX_EMBED);
6314 6316
6315 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 6317 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6316 mbox->u.mqe.un.set_host_data.param_len = 8; 6318 mbox->u.mqe.un.set_host_data.param_len =
6319 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6317 snprintf(mbox->u.mqe.un.set_host_data.data, 6320 snprintf(mbox->u.mqe.un.set_host_data.data,
6318 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 6321 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6319 "Linux %s v"LPFC_DRIVER_VERSION, 6322 "Linux %s v"LPFC_DRIVER_VERSION,
@@ -10035,6 +10038,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10035 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 10038 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10036 10039
10037 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 10040 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10041 abtsiocbp->vport = vport;
10038 10042
10039 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 10043 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10040 "0339 Abort xri x%x, original iotag x%x, " 10044 "0339 Abort xri x%x, original iotag x%x, "
@@ -17226,7 +17230,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
17226 unsigned long iflags = 0; 17230 unsigned long iflags = 0;
17227 char *fail_msg = NULL; 17231 char *fail_msg = NULL;
17228 struct lpfc_sglq *sglq; 17232 struct lpfc_sglq *sglq;
17229 union lpfc_wqe wqe; 17233 union lpfc_wqe128 wqe128;
17234 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
17230 uint32_t txq_cnt = 0; 17235 uint32_t txq_cnt = 0;
17231 17236
17232 spin_lock_irqsave(&pring->ring_lock, iflags); 17237 spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -17265,9 +17270,9 @@ lpfc_drain_txq(struct lpfc_hba *phba)
17265 piocbq->sli4_xritag = sglq->sli4_xritag; 17270 piocbq->sli4_xritag = sglq->sli4_xritag;
17266 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 17271 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
17267 fail_msg = "to convert bpl to sgl"; 17272 fail_msg = "to convert bpl to sgl";
17268 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 17273 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
17269 fail_msg = "to convert iocb to wqe"; 17274 fail_msg = "to convert iocb to wqe";
17270 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 17275 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
17271 fail_msg = " - Wq is full"; 17276 fail_msg = " - Wq is full";
17272 else 17277 else
17273 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 17278 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 50bfc43ebcb0..0ee0623a354c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "11.2.0.2" 21#define LPFC_DRIVER_VERSION "11.2.0.4"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index c27f4b724547..e18bbc66e83b 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -537,6 +537,12 @@ enable_vport(struct fc_vport *fc_vport)
537 537
538 spin_lock_irq(shost->host_lock); 538 spin_lock_irq(shost->host_lock);
539 vport->load_flag |= FC_LOADING; 539 vport->load_flag |= FC_LOADING;
540 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
541 spin_unlock_irq(shost->host_lock);
542 lpfc_issue_init_vpi(vport);
543 goto out;
544 }
545
540 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 546 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
541 spin_unlock_irq(shost->host_lock); 547 spin_unlock_irq(shost->host_lock);
542 548
@@ -557,6 +563,8 @@ enable_vport(struct fc_vport *fc_vport)
557 } else { 563 } else {
558 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 564 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
559 } 565 }
566
567out:
560 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 568 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
561 "1827 Vport Enabled.\n"); 569 "1827 Vport Enabled.\n");
562 return VPORT_OK; 570 return VPORT_OK;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index ccb68d12692c..196acc79714b 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -154,7 +154,7 @@ __asm__ __volatile__ \
154static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, 154static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
155 unsigned char *dst, int len) 155 unsigned char *dst, int len)
156{ 156{
157 unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); 157 u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
158 unsigned char *d = dst; 158 unsigned char *d = dst;
159 int n = len; 159 int n = len;
160 int transferred; 160 int transferred;
@@ -257,7 +257,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
257 unsigned char *src, int len) 257 unsigned char *src, int len)
258{ 258{
259 unsigned char *s = src; 259 unsigned char *s = src;
260 unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); 260 u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
261 int n = len; 261 int n = len;
262 int transferred; 262 int transferred;
263 263
@@ -381,10 +381,10 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
381 381
382 hostdata = shost_priv(instance); 382 hostdata = shost_priv(instance);
383 hostdata->base = pio_mem->start; 383 hostdata->base = pio_mem->start;
384 hostdata->io = (void *)pio_mem->start; 384 hostdata->io = (u8 __iomem *)pio_mem->start;
385 385
386 if (pdma_mem && setup_use_pdma) 386 if (pdma_mem && setup_use_pdma)
387 hostdata->pdma_io = (void *)pdma_mem->start; 387 hostdata->pdma_io = (u8 __iomem *)pdma_mem->start;
388 else 388 else
389 host_flags |= FLAG_NO_PSEUDO_DMA; 389 host_flags |= FLAG_NO_PSEUDO_DMA;
390 390
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fdd519c1dd57..e7e5974e1a2c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "06.812.07.00-rc1" 38#define MEGASAS_VERSION "07.701.16.00-rc1"
39#define MEGASAS_RELDATE "August 22, 2016" 39#define MEGASAS_RELDATE "February 2, 2017"
40 40
41/* 41/*
42 * Device IDs 42 * Device IDs
@@ -56,6 +56,11 @@
56#define PCI_DEVICE_ID_LSI_INTRUDER_24 0x00cf 56#define PCI_DEVICE_ID_LSI_INTRUDER_24 0x00cf
57#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052 57#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052
58#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053 58#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053
59#define PCI_DEVICE_ID_LSI_VENTURA 0x0014
60#define PCI_DEVICE_ID_LSI_HARPOON 0x0016
61#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
62#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
63#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C
59 64
60/* 65/*
61 * Intel HBA SSDIDs 66 * Intel HBA SSDIDs
@@ -100,7 +105,7 @@
100 */ 105 */
101 106
102/* 107/*
103 * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for 108 * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
104 * protocol between the software and firmware. Commands are issued using 109 * protocol between the software and firmware. Commands are issued using
105 * "message frames" 110 * "message frames"
106 */ 111 */
@@ -690,6 +695,18 @@ struct MR_PD_INFO {
690 u8 reserved1[512-428]; 695 u8 reserved1[512-428];
691} __packed; 696} __packed;
692 697
698/*
699 * Definition of structure used to expose attributes of VD or JBOD
700 * (this structure is to be filled by firmware when MR_DCMD_DRV_GET_TARGET_PROP
701 * is fired by driver)
702 */
703struct MR_TARGET_PROPERTIES {
704 u32 max_io_size_kb;
705 u32 device_qdepth;
706 u32 sector_size;
707 u8 reserved[500];
708} __packed;
709
693 /* 710 /*
694 * defines the physical drive address structure 711 * defines the physical drive address structure
695 */ 712 */
@@ -728,7 +745,6 @@ struct megasas_pd_list {
728 u16 tid; 745 u16 tid;
729 u8 driveType; 746 u8 driveType;
730 u8 driveState; 747 u8 driveState;
731 u8 interface;
732} __packed; 748} __packed;
733 749
734 /* 750 /*
@@ -1312,7 +1328,55 @@ struct megasas_ctrl_info {
1312#endif 1328#endif
1313 } adapterOperations3; 1329 } adapterOperations3;
1314 1330
1315 u8 pad[0x800-0x7EC]; 1331 struct {
1332#if defined(__BIG_ENDIAN_BITFIELD)
1333 u8 reserved:7;
1334 /* Indicates whether the CPLD image is part of
1335 * the package and stored in flash
1336 */
1337 u8 cpld_in_flash:1;
1338#else
1339 u8 cpld_in_flash:1;
1340 u8 reserved:7;
1341#endif
1342 u8 reserved1[3];
1343 /* Null terminated string. Has the version
1344 * information if cpld_in_flash = FALSE
1345 */
1346 u8 userCodeDefinition[12];
1347 } cpld; /* Valid only if upgradableCPLD is TRUE */
1348
1349 struct {
1350 #if defined(__BIG_ENDIAN_BITFIELD)
1351 u16 reserved:8;
1352 u16 fw_swaps_bbu_vpd_info:1;
1353 u16 support_pd_map_target_id:1;
1354 u16 support_ses_ctrl_in_multipathcfg:1;
1355 u16 image_upload_supported:1;
1356 u16 support_encrypted_mfc:1;
1357 u16 supported_enc_algo:1;
1358 u16 support_ibutton_less:1;
1359 u16 ctrl_info_ext_supported:1;
1360 #else
1361
1362 u16 ctrl_info_ext_supported:1;
1363 u16 support_ibutton_less:1;
1364 u16 supported_enc_algo:1;
1365 u16 support_encrypted_mfc:1;
1366 u16 image_upload_supported:1;
1367 /* FW supports LUN based association and target port based */
1368 u16 support_ses_ctrl_in_multipathcfg:1;
1369 /* association for the SES device connected in multipath mode */
1370 /* FW defines Jbod target Id within MR_PD_CFG_SEQ */
1371 u16 support_pd_map_target_id:1;
1372 /* FW swaps relevant fields in MR_BBU_VPD_INFO_FIXED to
1373 * provide the data in little endian order
1374 */
1375 u16 fw_swaps_bbu_vpd_info:1;
1376 u16 reserved:8;
1377 #endif
1378 } adapter_operations4;
1379 u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
1316} __packed; 1380} __packed;
1317 1381
1318/* 1382/*
@@ -1339,12 +1403,15 @@ struct megasas_ctrl_info {
1339 1403
1340#define MEGASAS_FW_BUSY 1 1404#define MEGASAS_FW_BUSY 1
1341 1405
1342#define VD_EXT_DEBUG 0 1406/* Driver's internal Logging levels*/
1407#define OCR_LOGS (1 << 0)
1343 1408
1344#define SCAN_PD_CHANNEL 0x1 1409#define SCAN_PD_CHANNEL 0x1
1345#define SCAN_VD_CHANNEL 0x2 1410#define SCAN_VD_CHANNEL 0x2
1346 1411
1347#define MEGASAS_KDUMP_QUEUE_DEPTH 100 1412#define MEGASAS_KDUMP_QUEUE_DEPTH 100
1413#define MR_LARGE_IO_MIN_SIZE (32 * 1024)
1414#define MR_R1_LDIO_PIGGYBACK_DEFAULT 4
1348 1415
1349enum MR_SCSI_CMD_TYPE { 1416enum MR_SCSI_CMD_TYPE {
1350 READ_WRITE_LDIO = 0, 1417 READ_WRITE_LDIO = 0,
@@ -1391,7 +1458,7 @@ enum FW_BOOT_CONTEXT {
1391 */ 1458 */
1392#define MEGASAS_INT_CMDS 32 1459#define MEGASAS_INT_CMDS 32
1393#define MEGASAS_SKINNY_INT_CMDS 5 1460#define MEGASAS_SKINNY_INT_CMDS 5
1394#define MEGASAS_FUSION_INTERNAL_CMDS 5 1461#define MEGASAS_FUSION_INTERNAL_CMDS 8
1395#define MEGASAS_FUSION_IOCTL_CMDS 3 1462#define MEGASAS_FUSION_IOCTL_CMDS 3
1396#define MEGASAS_MFI_IOCTL_CMDS 27 1463#define MEGASAS_MFI_IOCTL_CMDS 27
1397 1464
@@ -1429,13 +1496,19 @@ enum FW_BOOT_CONTEXT {
1429#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 1496#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
1430#define MR_MAX_MSIX_REG_ARRAY 16 1497#define MR_MAX_MSIX_REG_ARRAY 16
1431#define MR_RDPQ_MODE_OFFSET 0X00800000 1498#define MR_RDPQ_MODE_OFFSET 0X00800000
1499
1500#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16
1501#define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF
1502#define MR_MIN_MAP_SIZE 0x10000
1503/* 64k */
1504
1432#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 1505#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
1433 1506
1434/* 1507/*
1435* register set for both 1068 and 1078 controllers 1508* register set for both 1068 and 1078 controllers
1436* structure extended for 1078 registers 1509* structure extended for 1078 registers
1437*/ 1510*/
1438 1511
1439struct megasas_register_set { 1512struct megasas_register_set {
1440 u32 doorbell; /*0000h*/ 1513 u32 doorbell; /*0000h*/
1441 u32 fusion_seq_offset; /*0004h*/ 1514 u32 fusion_seq_offset; /*0004h*/
@@ -1471,14 +1544,14 @@ struct megasas_register_set {
1471 u32 outbound_scratch_pad ; /*00B0h*/ 1544 u32 outbound_scratch_pad ; /*00B0h*/
1472 u32 outbound_scratch_pad_2; /*00B4h*/ 1545 u32 outbound_scratch_pad_2; /*00B4h*/
1473 u32 outbound_scratch_pad_3; /*00B8h*/ 1546 u32 outbound_scratch_pad_3; /*00B8h*/
1547 u32 outbound_scratch_pad_4; /*00BCh*/
1474 1548
1475 u32 reserved_4; /*00BCh*/
1476 1549
1477 u32 inbound_low_queue_port ; /*00C0h*/ 1550 u32 inbound_low_queue_port ; /*00C0h*/
1478 1551
1479 u32 inbound_high_queue_port ; /*00C4h*/ 1552 u32 inbound_high_queue_port ; /*00C4h*/
1480 1553
1481 u32 reserved_5; /*00C8h*/ 1554 u32 inbound_single_queue_port; /*00C8h*/
1482 u32 res_6[11]; /*CCh*/ 1555 u32 res_6[11]; /*CCh*/
1483 u32 host_diag; 1556 u32 host_diag;
1484 u32 seq_offset; 1557 u32 seq_offset;
@@ -1544,33 +1617,35 @@ union megasas_sgl_frame {
1544typedef union _MFI_CAPABILITIES { 1617typedef union _MFI_CAPABILITIES {
1545 struct { 1618 struct {
1546#if defined(__BIG_ENDIAN_BITFIELD) 1619#if defined(__BIG_ENDIAN_BITFIELD)
1547 u32 reserved:20; 1620 u32 reserved:19;
1548 u32 support_qd_throttling:1; 1621 u32 support_pd_map_target_id:1;
1549 u32 support_fp_rlbypass:1; 1622 u32 support_qd_throttling:1;
1550 u32 support_vfid_in_ioframe:1; 1623 u32 support_fp_rlbypass:1;
1551 u32 support_ext_io_size:1; 1624 u32 support_vfid_in_ioframe:1;
1552 u32 support_ext_queue_depth:1; 1625 u32 support_ext_io_size:1;
1553 u32 security_protocol_cmds_fw:1; 1626 u32 support_ext_queue_depth:1;
1554 u32 support_core_affinity:1; 1627 u32 security_protocol_cmds_fw:1;
1555 u32 support_ndrive_r1_lb:1; 1628 u32 support_core_affinity:1;
1556 u32 support_max_255lds:1; 1629 u32 support_ndrive_r1_lb:1;
1557 u32 support_fastpath_wb:1; 1630 u32 support_max_255lds:1;
1558 u32 support_additional_msix:1; 1631 u32 support_fastpath_wb:1;
1559 u32 support_fp_remote_lun:1; 1632 u32 support_additional_msix:1;
1633 u32 support_fp_remote_lun:1;
1560#else 1634#else
1561 u32 support_fp_remote_lun:1; 1635 u32 support_fp_remote_lun:1;
1562 u32 support_additional_msix:1; 1636 u32 support_additional_msix:1;
1563 u32 support_fastpath_wb:1; 1637 u32 support_fastpath_wb:1;
1564 u32 support_max_255lds:1; 1638 u32 support_max_255lds:1;
1565 u32 support_ndrive_r1_lb:1; 1639 u32 support_ndrive_r1_lb:1;
1566 u32 support_core_affinity:1; 1640 u32 support_core_affinity:1;
1567 u32 security_protocol_cmds_fw:1; 1641 u32 security_protocol_cmds_fw:1;
1568 u32 support_ext_queue_depth:1; 1642 u32 support_ext_queue_depth:1;
1569 u32 support_ext_io_size:1; 1643 u32 support_ext_io_size:1;
1570 u32 support_vfid_in_ioframe:1; 1644 u32 support_vfid_in_ioframe:1;
1571 u32 support_fp_rlbypass:1; 1645 u32 support_fp_rlbypass:1;
1572 u32 support_qd_throttling:1; 1646 u32 support_qd_throttling:1;
1573 u32 reserved:20; 1647 u32 support_pd_map_target_id:1;
1648 u32 reserved:19;
1574#endif 1649#endif
1575 } mfi_capabilities; 1650 } mfi_capabilities;
1576 __le32 reg; 1651 __le32 reg;
@@ -1803,6 +1878,8 @@ union megasas_frame {
1803struct MR_PRIV_DEVICE { 1878struct MR_PRIV_DEVICE {
1804 bool is_tm_capable; 1879 bool is_tm_capable;
1805 bool tm_busy; 1880 bool tm_busy;
1881 atomic_t r1_ldio_hint;
1882 u8 interface_type;
1806}; 1883};
1807struct megasas_cmd; 1884struct megasas_cmd;
1808 1885
@@ -1994,17 +2071,24 @@ struct MR_DRV_SYSTEM_INFO {
1994}; 2071};
1995 2072
1996enum MR_PD_TYPE { 2073enum MR_PD_TYPE {
1997 UNKNOWN_DRIVE = 0, 2074 UNKNOWN_DRIVE = 0,
1998 PARALLEL_SCSI = 1, 2075 PARALLEL_SCSI = 1,
1999 SAS_PD = 2, 2076 SAS_PD = 2,
2000 SATA_PD = 3, 2077 SATA_PD = 3,
2001 FC_PD = 4, 2078 FC_PD = 4,
2079 NVME_PD = 5,
2002}; 2080};
2003 2081
2004/* JBOD Queue depth definitions */ 2082/* JBOD Queue depth definitions */
2005#define MEGASAS_SATA_QD 32 2083#define MEGASAS_SATA_QD 32
2006#define MEGASAS_SAS_QD 64 2084#define MEGASAS_SAS_QD 64
2007#define MEGASAS_DEFAULT_PD_QD 64 2085#define MEGASAS_DEFAULT_PD_QD 64
2086#define MEGASAS_NVME_QD 32
2087
2088#define MR_DEFAULT_NVME_PAGE_SIZE 4096
2089#define MR_DEFAULT_NVME_PAGE_SHIFT 12
2090#define MR_DEFAULT_NVME_MDTS_KB 128
2091#define MR_NVME_PAGE_SIZE_MASK 0x000000FF
2008 2092
2009struct megasas_instance { 2093struct megasas_instance {
2010 2094
@@ -2022,6 +2106,8 @@ struct megasas_instance {
2022 dma_addr_t hb_host_mem_h; 2106 dma_addr_t hb_host_mem_h;
2023 struct MR_PD_INFO *pd_info; 2107 struct MR_PD_INFO *pd_info;
2024 dma_addr_t pd_info_h; 2108 dma_addr_t pd_info_h;
2109 struct MR_TARGET_PROPERTIES *tgt_prop;
2110 dma_addr_t tgt_prop_h;
2025 2111
2026 __le32 *reply_queue; 2112 __le32 *reply_queue;
2027 dma_addr_t reply_queue_h; 2113 dma_addr_t reply_queue_h;
@@ -2039,6 +2125,7 @@ struct megasas_instance {
2039 u32 crash_dump_drv_support; 2125 u32 crash_dump_drv_support;
2040 u32 crash_dump_app_support; 2126 u32 crash_dump_app_support;
2041 u32 secure_jbod_support; 2127 u32 secure_jbod_support;
2128 u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
2042 bool use_seqnum_jbod_fp; /* Added for PD sequence */ 2129 bool use_seqnum_jbod_fp; /* Added for PD sequence */
2043 spinlock_t crashdump_lock; 2130 spinlock_t crashdump_lock;
2044 2131
@@ -2051,6 +2138,7 @@ struct megasas_instance {
2051 2138
2052 u16 max_num_sge; 2139 u16 max_num_sge;
2053 u16 max_fw_cmds; 2140 u16 max_fw_cmds;
2141 u16 max_mpt_cmds;
2054 u16 max_mfi_cmds; 2142 u16 max_mfi_cmds;
2055 u16 max_scsi_cmds; 2143 u16 max_scsi_cmds;
2056 u16 ldio_threshold; 2144 u16 ldio_threshold;
@@ -2065,6 +2153,7 @@ struct megasas_instance {
2065 /* used to sync fire the cmd to fw */ 2153 /* used to sync fire the cmd to fw */
2066 spinlock_t hba_lock; 2154 spinlock_t hba_lock;
2067 /* used to synch producer, consumer ptrs in dpc */ 2155 /* used to synch producer, consumer ptrs in dpc */
2156 spinlock_t stream_lock;
2068 spinlock_t completion_lock; 2157 spinlock_t completion_lock;
2069 struct dma_pool *frame_dma_pool; 2158 struct dma_pool *frame_dma_pool;
2070 struct dma_pool *sense_dma_pool; 2159 struct dma_pool *sense_dma_pool;
@@ -2087,6 +2176,11 @@ struct megasas_instance {
2087 atomic_t fw_outstanding; 2176 atomic_t fw_outstanding;
2088 atomic_t ldio_outstanding; 2177 atomic_t ldio_outstanding;
2089 atomic_t fw_reset_no_pci_access; 2178 atomic_t fw_reset_no_pci_access;
2179 atomic_t ieee_sgl;
2180 atomic_t prp_sgl;
2181 atomic_t sge_holes_type1;
2182 atomic_t sge_holes_type2;
2183 atomic_t sge_holes_type3;
2090 2184
2091 struct megasas_instance_template *instancet; 2185 struct megasas_instance_template *instancet;
2092 struct tasklet_struct isr_tasklet; 2186 struct tasklet_struct isr_tasklet;
@@ -2142,6 +2236,13 @@ struct megasas_instance {
2142 u8 is_rdpq; 2236 u8 is_rdpq;
2143 bool dev_handle; 2237 bool dev_handle;
2144 bool fw_sync_cache_support; 2238 bool fw_sync_cache_support;
2239 u32 mfi_frame_size;
2240 bool is_ventura;
2241 bool msix_combined;
2242 u16 max_raid_mapsize;
2243 /* preffered count to send as LDIO irrspective of FP capable.*/
2244 u8 r1_ldio_hint_default;
2245 u32 nvme_page_size;
2145}; 2246};
2146struct MR_LD_VF_MAP { 2247struct MR_LD_VF_MAP {
2147 u32 size; 2248 u32 size;
@@ -2230,12 +2331,12 @@ struct megasas_instance_template {
2230 u32 (*init_adapter)(struct megasas_instance *); 2331 u32 (*init_adapter)(struct megasas_instance *);
2231 u32 (*build_and_issue_cmd) (struct megasas_instance *, 2332 u32 (*build_and_issue_cmd) (struct megasas_instance *,
2232 struct scsi_cmnd *); 2333 struct scsi_cmnd *);
2233 int (*issue_dcmd)(struct megasas_instance *instance, 2334 void (*issue_dcmd)(struct megasas_instance *instance,
2234 struct megasas_cmd *cmd); 2335 struct megasas_cmd *cmd);
2235}; 2336};
2236 2337
2237#define MEGASAS_IS_LOGICAL(scp) \ 2338#define MEGASAS_IS_LOGICAL(sdev) \
2238 ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) 2339 ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
2239 2340
2240#define MEGASAS_DEV_INDEX(scp) \ 2341#define MEGASAS_DEV_INDEX(scp) \
2241 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ 2342 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
@@ -2346,7 +2447,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
2346 struct IO_REQUEST_INFO *io_info, 2447 struct IO_REQUEST_INFO *io_info,
2347 struct RAID_CONTEXT *pRAID_Context, 2448 struct RAID_CONTEXT *pRAID_Context,
2348 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN); 2449 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
2349u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); 2450u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
2350struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); 2451struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
2351u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); 2452u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
2352u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); 2453u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
@@ -2354,13 +2455,16 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
2354u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); 2455u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
2355 2456
2356__le16 get_updated_dev_handle(struct megasas_instance *instance, 2457__le16 get_updated_dev_handle(struct megasas_instance *instance,
2357 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); 2458 struct LD_LOAD_BALANCE_INFO *lbInfo,
2459 struct IO_REQUEST_INFO *in_info,
2460 struct MR_DRV_RAID_MAP_ALL *drv_map);
2358void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, 2461void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
2359 struct LD_LOAD_BALANCE_INFO *lbInfo); 2462 struct LD_LOAD_BALANCE_INFO *lbInfo);
2360int megasas_get_ctrl_info(struct megasas_instance *instance); 2463int megasas_get_ctrl_info(struct megasas_instance *instance);
2361/* PD sequence */ 2464/* PD sequence */
2362int 2465int
2363megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend); 2466megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
2467void megasas_set_dynamic_target_properties(struct scsi_device *sdev);
2364int megasas_set_crash_dump_params(struct megasas_instance *instance, 2468int megasas_set_crash_dump_params(struct megasas_instance *instance,
2365 u8 crash_buf_state); 2469 u8 crash_buf_state);
2366void megasas_free_host_crash_buffer(struct megasas_instance *instance); 2470void megasas_free_host_crash_buffer(struct megasas_instance *instance);
@@ -2382,4 +2486,7 @@ void megasas_update_sdev_properties(struct scsi_device *sdev);
2382int megasas_reset_fusion(struct Scsi_Host *shost, int reason); 2486int megasas_reset_fusion(struct Scsi_Host *shost, int reason);
2383int megasas_task_abort_fusion(struct scsi_cmnd *scmd); 2487int megasas_task_abort_fusion(struct scsi_cmnd *scmd);
2384int megasas_reset_target_fusion(struct scsi_cmnd *scmd); 2488int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
2489u32 mega_mod64(u64 dividend, u32 divisor);
2490int megasas_alloc_fusion_context(struct megasas_instance *instance);
2491void megasas_free_fusion_context(struct megasas_instance *instance);
2385#endif /*LSI_MEGARAID_SAS_H */ 2492#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d5cf15eb8c5e..7ac9a9ee9bd4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -43,6 +43,7 @@
43#include <linux/uio.h> 43#include <linux/uio.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/uaccess.h> 45#include <linux/uaccess.h>
46#include <asm/unaligned.h>
46#include <linux/fs.h> 47#include <linux/fs.h>
47#include <linux/compat.h> 48#include <linux/compat.h>
48#include <linux/blkdev.h> 49#include <linux/blkdev.h>
@@ -116,8 +117,10 @@ static int megasas_ld_list_query(struct megasas_instance *instance,
116static int megasas_issue_init_mfi(struct megasas_instance *instance); 117static int megasas_issue_init_mfi(struct megasas_instance *instance);
117static int megasas_register_aen(struct megasas_instance *instance, 118static int megasas_register_aen(struct megasas_instance *instance,
118 u32 seq_num, u32 class_locale_word); 119 u32 seq_num, u32 class_locale_word);
119static int 120static void megasas_get_pd_info(struct megasas_instance *instance,
120megasas_get_pd_info(struct megasas_instance *instance, u16 device_id); 121 struct scsi_device *sdev);
122static int megasas_get_target_prop(struct megasas_instance *instance,
123 struct scsi_device *sdev);
121/* 124/*
122 * PCI ID table for all supported controllers 125 * PCI ID table for all supported controllers
123 */ 126 */
@@ -155,6 +158,12 @@ static struct pci_device_id megasas_pci_table[] = {
155 /* Intruder 24 port*/ 158 /* Intruder 24 port*/
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, 159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
161 /* VENTURA */
162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
158 {} 167 {}
159}; 168};
160 169
@@ -196,12 +205,12 @@ void megasas_fusion_ocr_wq(struct work_struct *work);
196static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 205static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
197 int initial); 206 int initial);
198 207
199int 208void
200megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 209megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
201{ 210{
202 instance->instancet->fire_cmd(instance, 211 instance->instancet->fire_cmd(instance,
203 cmd->frame_phys_addr, 0, instance->reg_set); 212 cmd->frame_phys_addr, 0, instance->reg_set);
204 return 0; 213 return;
205} 214}
206 215
207/** 216/**
@@ -259,6 +268,8 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
259 cmd->scmd = NULL; 268 cmd->scmd = NULL;
260 cmd->frame_count = 0; 269 cmd->frame_count = 0;
261 cmd->flags = 0; 270 cmd->flags = 0;
271 memset(cmd->frame, 0, instance->mfi_frame_size);
272 cmd->frame->io.context = cpu_to_le32(cmd->index);
262 if (!fusion && reset_devices) 273 if (!fusion && reset_devices)
263 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 274 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
264 list_add(&cmd->list, (&instance->cmd_pool)->next); 275 list_add(&cmd->list, (&instance->cmd_pool)->next);
@@ -989,13 +1000,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
989 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; 1000 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
990 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 1001 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
991 1002
992 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1003 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
993 (instance->instancet->issue_dcmd(instance, cmd))) {
994 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1004 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
995 __func__, __LINE__); 1005 __func__, __LINE__);
996 return DCMD_NOT_FIRED; 1006 return DCMD_NOT_FIRED;
997 } 1007 }
998 1008
1009 instance->instancet->issue_dcmd(instance, cmd);
1010
999 return wait_and_poll(instance, cmd, instance->requestorId ? 1011 return wait_and_poll(instance, cmd, instance->requestorId ?
1000 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); 1012 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1001} 1013}
@@ -1017,13 +1029,14 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
1017 int ret = 0; 1029 int ret = 0;
1018 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1030 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1019 1031
1020 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1032 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1021 (instance->instancet->issue_dcmd(instance, cmd))) {
1022 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1033 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1023 __func__, __LINE__); 1034 __func__, __LINE__);
1024 return DCMD_NOT_FIRED; 1035 return DCMD_NOT_FIRED;
1025 } 1036 }
1026 1037
1038 instance->instancet->issue_dcmd(instance, cmd);
1039
1027 if (timeout) { 1040 if (timeout) {
1028 ret = wait_event_timeout(instance->int_cmd_wait_q, 1041 ret = wait_event_timeout(instance->int_cmd_wait_q,
1029 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1042 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
@@ -1081,13 +1094,14 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1081 cmd->sync_cmd = 1; 1094 cmd->sync_cmd = 1;
1082 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 1095 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1083 1096
1084 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || 1097 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1085 (instance->instancet->issue_dcmd(instance, cmd))) {
1086 dev_err(&instance->pdev->dev, "Failed from %s %d\n", 1098 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1087 __func__, __LINE__); 1099 __func__, __LINE__);
1088 return DCMD_NOT_FIRED; 1100 return DCMD_NOT_FIRED;
1089 } 1101 }
1090 1102
1103 instance->instancet->issue_dcmd(instance, cmd);
1104
1091 if (timeout) { 1105 if (timeout) {
1092 ret = wait_event_timeout(instance->abort_cmd_wait_q, 1106 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1093 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); 1107 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
@@ -1273,7 +1287,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1273 u16 flags = 0; 1287 u16 flags = 0;
1274 struct megasas_pthru_frame *pthru; 1288 struct megasas_pthru_frame *pthru;
1275 1289
1276 is_logical = MEGASAS_IS_LOGICAL(scp); 1290 is_logical = MEGASAS_IS_LOGICAL(scp->device);
1277 device_id = MEGASAS_DEV_INDEX(scp); 1291 device_id = MEGASAS_DEV_INDEX(scp);
1278 pthru = (struct megasas_pthru_frame *)cmd->frame; 1292 pthru = (struct megasas_pthru_frame *)cmd->frame;
1279 1293
@@ -1513,11 +1527,11 @@ inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1513 case WRITE_6: 1527 case WRITE_6:
1514 case READ_16: 1528 case READ_16:
1515 case WRITE_16: 1529 case WRITE_16:
1516 ret = (MEGASAS_IS_LOGICAL(cmd)) ? 1530 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1517 READ_WRITE_LDIO : READ_WRITE_SYSPDIO; 1531 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1518 break; 1532 break;
1519 default: 1533 default:
1520 ret = (MEGASAS_IS_LOGICAL(cmd)) ? 1534 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1521 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; 1535 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1522 } 1536 }
1523 return ret; 1537 return ret;
@@ -1537,7 +1551,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
1537 struct megasas_io_frame *ldio; 1551 struct megasas_io_frame *ldio;
1538 struct megasas_pthru_frame *pthru; 1552 struct megasas_pthru_frame *pthru;
1539 u32 sgcount; 1553 u32 sgcount;
1540 u32 max_cmd = instance->max_fw_cmds; 1554 u16 max_cmd = instance->max_fw_cmds;
1541 1555
1542 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1556 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1543 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1557 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
@@ -1662,7 +1676,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1662 /* Check for an mpio path and adjust behavior */ 1676 /* Check for an mpio path and adjust behavior */
1663 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { 1677 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1664 if (megasas_check_mpio_paths(instance, scmd) == 1678 if (megasas_check_mpio_paths(instance, scmd) ==
1665 (DID_RESET << 16)) { 1679 (DID_REQUEUE << 16)) {
1666 return SCSI_MLQUEUE_HOST_BUSY; 1680 return SCSI_MLQUEUE_HOST_BUSY;
1667 } else { 1681 } else {
1668 scmd->result = DID_NO_CONNECT << 16; 1682 scmd->result = DID_NO_CONNECT << 16;
@@ -1693,15 +1707,16 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1693 1707
1694 scmd->result = 0; 1708 scmd->result = 0;
1695 1709
1696 if (MEGASAS_IS_LOGICAL(scmd) && 1710 if (MEGASAS_IS_LOGICAL(scmd->device) &&
1697 (scmd->device->id >= instance->fw_supported_vd_count || 1711 (scmd->device->id >= instance->fw_supported_vd_count ||
1698 scmd->device->lun)) { 1712 scmd->device->lun)) {
1699 scmd->result = DID_BAD_TARGET << 16; 1713 scmd->result = DID_BAD_TARGET << 16;
1700 goto out_done; 1714 goto out_done;
1701 } 1715 }
1702 1716
1703 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) && 1717 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1704 (!instance->fw_sync_cache_support)) { 1718 MEGASAS_IS_LOGICAL(scmd->device) &&
1719 (!instance->fw_sync_cache_support)) {
1705 scmd->result = DID_OK << 16; 1720 scmd->result = DID_OK << 16;
1706 goto out_done; 1721 goto out_done;
1707 } 1722 }
@@ -1728,16 +1743,21 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1728} 1743}
1729 1744
1730/* 1745/*
1731* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities 1746* megasas_set_dynamic_target_properties -
1747* Device property set by driver may not be static and it is required to be
1748* updated after OCR
1749*
1750* set tm_capable.
1751* set dma alignment (only for eedp protection enable vd).
1732* 1752*
1733* @sdev: OS provided scsi device 1753* @sdev: OS provided scsi device
1734* 1754*
1735* Returns void 1755* Returns void
1736*/ 1756*/
1737void megasas_update_sdev_properties(struct scsi_device *sdev) 1757void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1738{ 1758{
1739 u16 pd_index = 0; 1759 u16 pd_index = 0, ld;
1740 u32 device_id, ld; 1760 u32 device_id;
1741 struct megasas_instance *instance; 1761 struct megasas_instance *instance;
1742 struct fusion_context *fusion; 1762 struct fusion_context *fusion;
1743 struct MR_PRIV_DEVICE *mr_device_priv_data; 1763 struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -1749,67 +1769,129 @@ void megasas_update_sdev_properties(struct scsi_device *sdev)
1749 fusion = instance->ctrl_context; 1769 fusion = instance->ctrl_context;
1750 mr_device_priv_data = sdev->hostdata; 1770 mr_device_priv_data = sdev->hostdata;
1751 1771
1752 if (!fusion) 1772 if (!fusion || !mr_device_priv_data)
1753 return; 1773 return;
1754 1774
1755 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1775 if (MEGASAS_IS_LOGICAL(sdev)) {
1756 instance->use_seqnum_jbod_fp) {
1757 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1758 sdev->id;
1759 pd_sync = (void *)fusion->pd_seq_sync
1760 [(instance->pd_seq_map_id - 1) & 1];
1761 mr_device_priv_data->is_tm_capable =
1762 pd_sync->seq[pd_index].capability.tmCapable;
1763 } else {
1764 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) 1776 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1765 + sdev->id; 1777 + sdev->id;
1766 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1778 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1767 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1779 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1780 if (ld >= instance->fw_supported_vd_count)
1781 return;
1768 raid = MR_LdRaidGet(ld, local_map_ptr); 1782 raid = MR_LdRaidGet(ld, local_map_ptr);
1769 1783
1770 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1784 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1771 blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1785 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1786
1772 mr_device_priv_data->is_tm_capable = 1787 mr_device_priv_data->is_tm_capable =
1773 raid->capability.tmCapable; 1788 raid->capability.tmCapable;
1789 } else if (instance->use_seqnum_jbod_fp) {
1790 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1791 sdev->id;
1792 pd_sync = (void *)fusion->pd_seq_sync
1793 [(instance->pd_seq_map_id - 1) & 1];
1794 mr_device_priv_data->is_tm_capable =
1795 pd_sync->seq[pd_index].capability.tmCapable;
1774 } 1796 }
1775} 1797}
1776 1798
1777static void megasas_set_device_queue_depth(struct scsi_device *sdev) 1799/*
1800 * megasas_set_nvme_device_properties -
1801 * set nomerges=2
1802 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1803 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1804 *
1805 * MR firmware provides value in KB. Caller of this function converts
1806 * kb into bytes.
1807 *
1808 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1809 * MR firmware provides value 128 as (32 * 4K) = 128K.
1810 *
1811 * @sdev: scsi device
1812 * @max_io_size: maximum io transfer size
1813 *
1814 */
1815static inline void
1816megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1778{ 1817{
1779 u16 pd_index = 0;
1780 int ret = DCMD_FAILED;
1781 struct megasas_instance *instance; 1818 struct megasas_instance *instance;
1819 u32 mr_nvme_pg_size;
1782 1820
1783 instance = megasas_lookup_instance(sdev->host->host_no); 1821 instance = (struct megasas_instance *)sdev->host->hostdata;
1822 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1823 MR_DEFAULT_NVME_PAGE_SIZE);
1784 1824
1785 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 1825 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1786 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1787 1826
1788 if (instance->pd_info) { 1827 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1789 mutex_lock(&instance->hba_mutex); 1828 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1790 ret = megasas_get_pd_info(instance, pd_index); 1829}
1791 mutex_unlock(&instance->hba_mutex);
1792 }
1793 1830
1794 if (ret != DCMD_SUCCESS)
1795 return;
1796 1831
1797 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1832/*
1833 * megasas_set_static_target_properties -
1834 * Device property set by driver are static and it is not required to be
1835 * updated after OCR.
1836 *
1837 * set io timeout
1838 * set device queue depth
1839 * set nvme device properties. see - megasas_set_nvme_device_properties
1840 *
1841 * @sdev: scsi device
1842 * @is_target_prop true, if fw provided target properties.
1843 */
1844static void megasas_set_static_target_properties(struct scsi_device *sdev,
1845 bool is_target_prop)
1846{
1847 u16 target_index = 0;
1848 u8 interface_type;
1849 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1850 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1851 u32 tgt_device_qd;
1852 struct megasas_instance *instance;
1853 struct MR_PRIV_DEVICE *mr_device_priv_data;
1798 1854
1799 switch (instance->pd_list[pd_index].interface) { 1855 instance = megasas_lookup_instance(sdev->host->host_no);
1800 case SAS_PD: 1856 mr_device_priv_data = sdev->hostdata;
1801 scsi_change_queue_depth(sdev, MEGASAS_SAS_QD); 1857 interface_type = mr_device_priv_data->interface_type;
1802 break;
1803 1858
1804 case SATA_PD: 1859 /*
1805 scsi_change_queue_depth(sdev, MEGASAS_SATA_QD); 1860 * The RAID firmware may require extended timeouts.
1806 break; 1861 */
1862 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1807 1863
1808 default: 1864 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1809 scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD); 1865
1810 } 1866 switch (interface_type) {
1811 } 1867 case SAS_PD:
1868 device_qd = MEGASAS_SAS_QD;
1869 break;
1870 case SATA_PD:
1871 device_qd = MEGASAS_SATA_QD;
1872 break;
1873 case NVME_PD:
1874 device_qd = MEGASAS_NVME_QD;
1875 break;
1876 }
1877
1878 if (is_target_prop) {
1879 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1880 if (tgt_device_qd &&
1881 (tgt_device_qd <= instance->host->can_queue))
1882 device_qd = tgt_device_qd;
1883
1884 /* max_io_size_kb will be set to non zero for
1885 * nvme based vd and syspd.
1886 */
1887 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1812 } 1888 }
1889
1890 if (instance->nvme_page_size && max_io_size_kb)
1891 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1892
1893 scsi_change_queue_depth(sdev, device_qd);
1894
1813} 1895}
1814 1896
1815 1897
@@ -1817,11 +1899,12 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1817{ 1899{
1818 u16 pd_index = 0; 1900 u16 pd_index = 0;
1819 struct megasas_instance *instance; 1901 struct megasas_instance *instance;
1902 int ret_target_prop = DCMD_FAILED;
1903 bool is_target_prop = false;
1820 1904
1821 instance = megasas_lookup_instance(sdev->host->host_no); 1905 instance = megasas_lookup_instance(sdev->host->host_no);
1822 if (instance->pd_list_not_supported) { 1906 if (instance->pd_list_not_supported) {
1823 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1907 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1824 sdev->type == TYPE_DISK) {
1825 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1908 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1826 sdev->id; 1909 sdev->id;
1827 if (instance->pd_list[pd_index].driveState != 1910 if (instance->pd_list[pd_index].driveState !=
@@ -1829,14 +1912,25 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1829 return -ENXIO; 1912 return -ENXIO;
1830 } 1913 }
1831 } 1914 }
1832 megasas_set_device_queue_depth(sdev);
1833 megasas_update_sdev_properties(sdev);
1834 1915
1835 /* 1916 mutex_lock(&instance->hba_mutex);
1836 * The RAID firmware may require extended timeouts. 1917 /* Send DCMD to Firmware and cache the information */
1918 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1919 megasas_get_pd_info(instance, sdev);
1920
1921 /* Some ventura firmware may not have instance->nvme_page_size set.
1922 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1837 */ 1923 */
1838 blk_queue_rq_timeout(sdev->request_queue, 1924 if ((instance->tgt_prop) && (instance->nvme_page_size))
1839 scmd_timeout * HZ); 1925 ret_target_prop = megasas_get_target_prop(instance, sdev);
1926
1927 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1928 megasas_set_static_target_properties(sdev, is_target_prop);
1929
1930 mutex_unlock(&instance->hba_mutex);
1931
1932 /* This sdev property may change post OCR */
1933 megasas_set_dynamic_target_properties(sdev);
1840 1934
1841 return 0; 1935 return 0;
1842} 1936}
@@ -1848,7 +1942,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
1848 struct MR_PRIV_DEVICE *mr_device_priv_data; 1942 struct MR_PRIV_DEVICE *mr_device_priv_data;
1849 1943
1850 instance = megasas_lookup_instance(sdev->host->host_no); 1944 instance = megasas_lookup_instance(sdev->host->host_no);
1851 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 1945 if (!MEGASAS_IS_LOGICAL(sdev)) {
1852 /* 1946 /*
1853 * Open the OS scan to the SYSTEM PD 1947 * Open the OS scan to the SYSTEM PD
1854 */ 1948 */
@@ -2483,7 +2577,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2483 struct megasas_cmd, list); 2577 struct megasas_cmd, list);
2484 list_del_init(&reset_cmd->list); 2578 list_del_init(&reset_cmd->list);
2485 if (reset_cmd->scmd) { 2579 if (reset_cmd->scmd) {
2486 reset_cmd->scmd->result = DID_RESET << 16; 2580 reset_cmd->scmd->result = DID_REQUEUE << 16;
2487 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", 2581 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2488 reset_index, reset_cmd, 2582 reset_index, reset_cmd,
2489 reset_cmd->scmd->cmnd[0]); 2583 reset_cmd->scmd->cmnd[0]);
@@ -2651,6 +2745,24 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2651} 2745}
2652 2746
2653/** 2747/**
2748 * megasas_dump_frame - This function will dump MPT/MFI frame
2749 */
2750static inline void
2751megasas_dump_frame(void *mpi_request, int sz)
2752{
2753 int i;
2754 __le32 *mfp = (__le32 *)mpi_request;
2755
2756 printk(KERN_INFO "IO request frame:\n\t");
2757 for (i = 0; i < sz / sizeof(__le32); i++) {
2758 if (i && ((i % 8) == 0))
2759 printk("\n\t");
2760 printk("%08x ", le32_to_cpu(mfp[i]));
2761 }
2762 printk("\n");
2763}
2764
2765/**
2654 * megasas_reset_bus_host - Bus & host reset handler entry point 2766 * megasas_reset_bus_host - Bus & host reset handler entry point
2655 */ 2767 */
2656static int megasas_reset_bus_host(struct scsi_cmnd *scmd) 2768static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
@@ -2660,12 +2772,26 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2660 2772
2661 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2773 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2662 2774
2775 scmd_printk(KERN_INFO, scmd,
2776 "Controller reset is requested due to IO timeout\n"
2777 "SCSI command pointer: (%p)\t SCSI host state: %d\t"
2778 " SCSI host busy: %d\t FW outstanding: %d\n",
2779 scmd, scmd->device->host->shost_state,
2780 atomic_read((atomic_t *)&scmd->device->host->host_busy),
2781 atomic_read(&instance->fw_outstanding));
2782
2663 /* 2783 /*
2664 * First wait for all commands to complete 2784 * First wait for all commands to complete
2665 */ 2785 */
2666 if (instance->ctrl_context) 2786 if (instance->ctrl_context) {
2667 ret = megasas_reset_fusion(scmd->device->host, 1); 2787 struct megasas_cmd_fusion *cmd;
2668 else 2788 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2789 if (cmd)
2790 megasas_dump_frame(cmd->io_request,
2791 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
2792 ret = megasas_reset_fusion(scmd->device->host,
2793 SCSIIO_TIMEOUT_OCR);
2794 } else
2669 ret = megasas_generic_reset(scmd); 2795 ret = megasas_generic_reset(scmd);
2670 2796
2671 return ret; 2797 return ret;
@@ -3343,7 +3469,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3343{ 3469{
3344 struct megasas_cmd *cmd; 3470 struct megasas_cmd *cmd;
3345 int i; 3471 int i;
3346 u32 max_cmd = instance->max_fw_cmds; 3472 u16 max_cmd = instance->max_fw_cmds;
3347 u32 defer_index; 3473 u32 defer_index;
3348 unsigned long flags; 3474 unsigned long flags;
3349 3475
@@ -3719,7 +3845,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3719static void megasas_teardown_frame_pool(struct megasas_instance *instance) 3845static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3720{ 3846{
3721 int i; 3847 int i;
3722 u32 max_cmd = instance->max_mfi_cmds; 3848 u16 max_cmd = instance->max_mfi_cmds;
3723 struct megasas_cmd *cmd; 3849 struct megasas_cmd *cmd;
3724 3850
3725 if (!instance->frame_dma_pool) 3851 if (!instance->frame_dma_pool)
@@ -3763,9 +3889,8 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3763static int megasas_create_frame_pool(struct megasas_instance *instance) 3889static int megasas_create_frame_pool(struct megasas_instance *instance)
3764{ 3890{
3765 int i; 3891 int i;
3766 u32 max_cmd; 3892 u16 max_cmd;
3767 u32 sge_sz; 3893 u32 sge_sz;
3768 u32 total_sz;
3769 u32 frame_count; 3894 u32 frame_count;
3770 struct megasas_cmd *cmd; 3895 struct megasas_cmd *cmd;
3771 3896
@@ -3793,12 +3918,13 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3793 * Total 192 byte (3 MFI frame of 64 byte) 3918 * Total 192 byte (3 MFI frame of 64 byte)
3794 */ 3919 */
3795 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); 3920 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
3796 total_sz = MEGAMFI_FRAME_SIZE * frame_count; 3921 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3797 /* 3922 /*
3798 * Use DMA pool facility provided by PCI layer 3923 * Use DMA pool facility provided by PCI layer
3799 */ 3924 */
3800 instance->frame_dma_pool = pci_pool_create("megasas frame pool", 3925 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
3801 instance->pdev, total_sz, 256, 0); 3926 instance->pdev, instance->mfi_frame_size,
3927 256, 0);
3802 3928
3803 if (!instance->frame_dma_pool) { 3929 if (!instance->frame_dma_pool) {
3804 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); 3930 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
@@ -3842,7 +3968,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3842 return -ENOMEM; 3968 return -ENOMEM;
3843 } 3969 }
3844 3970
3845 memset(cmd->frame, 0, total_sz); 3971 memset(cmd->frame, 0, instance->mfi_frame_size);
3846 cmd->frame->io.context = cpu_to_le32(cmd->index); 3972 cmd->frame->io.context = cpu_to_le32(cmd->index);
3847 cmd->frame->io.pad_0 = 0; 3973 cmd->frame->io.pad_0 = 0;
3848 if (!instance->ctrl_context && reset_devices) 3974 if (!instance->ctrl_context && reset_devices)
@@ -3897,7 +4023,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
3897{ 4023{
3898 int i; 4024 int i;
3899 int j; 4025 int j;
3900 u32 max_cmd; 4026 u16 max_cmd;
3901 struct megasas_cmd *cmd; 4027 struct megasas_cmd *cmd;
3902 struct fusion_context *fusion; 4028 struct fusion_context *fusion;
3903 4029
@@ -3974,18 +4100,22 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
3974 return INITIATE_OCR; 4100 return INITIATE_OCR;
3975} 4101}
3976 4102
3977static int 4103static void
3978megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) 4104megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
3979{ 4105{
3980 int ret; 4106 int ret;
3981 struct megasas_cmd *cmd; 4107 struct megasas_cmd *cmd;
3982 struct megasas_dcmd_frame *dcmd; 4108 struct megasas_dcmd_frame *dcmd;
3983 4109
4110 struct MR_PRIV_DEVICE *mr_device_priv_data;
4111 u16 device_id = 0;
4112
4113 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
3984 cmd = megasas_get_cmd(instance); 4114 cmd = megasas_get_cmd(instance);
3985 4115
3986 if (!cmd) { 4116 if (!cmd) {
3987 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); 4117 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
3988 return -ENOMEM; 4118 return;
3989 } 4119 }
3990 4120
3991 dcmd = &cmd->frame->dcmd; 4121 dcmd = &cmd->frame->dcmd;
@@ -4012,7 +4142,9 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
4012 4142
4013 switch (ret) { 4143 switch (ret) {
4014 case DCMD_SUCCESS: 4144 case DCMD_SUCCESS:
4015 instance->pd_list[device_id].interface = 4145 mr_device_priv_data = sdev->hostdata;
4146 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4147 mr_device_priv_data->interface_type =
4016 instance->pd_info->state.ddf.pdType.intf; 4148 instance->pd_info->state.ddf.pdType.intf;
4017 break; 4149 break;
4018 4150
@@ -4039,7 +4171,7 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
4039 if (ret != DCMD_TIMEOUT) 4171 if (ret != DCMD_TIMEOUT)
4040 megasas_return_cmd(instance, cmd); 4172 megasas_return_cmd(instance, cmd);
4041 4173
4042 return ret; 4174 return;
4043} 4175}
4044/* 4176/*
4045 * megasas_get_pd_list_info - Returns FW's pd_list structure 4177 * megasas_get_pd_list_info - Returns FW's pd_list structure
@@ -4418,8 +4550,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4418static void megasas_update_ext_vd_details(struct megasas_instance *instance) 4550static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4419{ 4551{
4420 struct fusion_context *fusion; 4552 struct fusion_context *fusion;
4421 u32 old_map_sz; 4553 u32 ventura_map_sz = 0;
4422 u32 new_map_sz;
4423 4554
4424 fusion = instance->ctrl_context; 4555 fusion = instance->ctrl_context;
4425 /* For MFI based controllers return dummy success */ 4556 /* For MFI based controllers return dummy success */
@@ -4449,21 +4580,27 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4449 instance->supportmax256vd ? "Extended VD(240 VD)firmware" : 4580 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4450 "Legacy(64 VD) firmware"); 4581 "Legacy(64 VD) firmware");
4451 4582
4452 old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4583 if (instance->max_raid_mapsize) {
4453 (sizeof(struct MR_LD_SPAN_MAP) * 4584 ventura_map_sz = instance->max_raid_mapsize *
4454 (instance->fw_supported_vd_count - 1)); 4585 MR_MIN_MAP_SIZE; /* 64k */
4455 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4586 fusion->current_map_sz = ventura_map_sz;
4456 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) + 4587 fusion->max_map_sz = ventura_map_sz;
4457 (sizeof(struct MR_LD_SPAN_MAP) * 4588 } else {
4458 (instance->drv_supported_vd_count - 1)); 4589 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4459 4590 (sizeof(struct MR_LD_SPAN_MAP) *
4460 fusion->max_map_sz = max(old_map_sz, new_map_sz); 4591 (instance->fw_supported_vd_count - 1));
4592 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4461 4593
4594 fusion->max_map_sz =
4595 max(fusion->old_map_sz, fusion->new_map_sz);
4462 4596
4463 if (instance->supportmax256vd) 4597 if (instance->supportmax256vd)
4464 fusion->current_map_sz = new_map_sz; 4598 fusion->current_map_sz = fusion->new_map_sz;
4465 else 4599 else
4466 fusion->current_map_sz = old_map_sz; 4600 fusion->current_map_sz = fusion->old_map_sz;
4601 }
4602 /* irrespective of FW raid maps, driver raid map is constant */
4603 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4467} 4604}
4468 4605
4469/** 4606/**
@@ -4533,6 +4670,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4533 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); 4670 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4534 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4671 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4535 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); 4672 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4673 le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
4536 4674
4537 /* Update the latest Ext VD info. 4675 /* Update the latest Ext VD info.
4538 * From Init path, store current firmware details. 4676 * From Init path, store current firmware details.
@@ -4542,6 +4680,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4542 megasas_update_ext_vd_details(instance); 4680 megasas_update_ext_vd_details(instance);
4543 instance->use_seqnum_jbod_fp = 4681 instance->use_seqnum_jbod_fp =
4544 ctrl_info->adapterOperations3.useSeqNumJbodFP; 4682 ctrl_info->adapterOperations3.useSeqNumJbodFP;
4683 instance->support_morethan256jbod =
4684 ctrl_info->adapter_operations4.support_pd_map_target_id;
4545 4685
4546 /*Check whether controller is iMR or MR */ 4686 /*Check whether controller is iMR or MR */
4547 instance->is_imr = (ctrl_info->memory_size ? 0 : 1); 4687 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
@@ -4989,13 +5129,13 @@ skip_alloc:
4989static int megasas_init_fw(struct megasas_instance *instance) 5129static int megasas_init_fw(struct megasas_instance *instance)
4990{ 5130{
4991 u32 max_sectors_1; 5131 u32 max_sectors_1;
4992 u32 max_sectors_2; 5132 u32 max_sectors_2, tmp_sectors, msix_enable;
4993 u32 tmp_sectors, msix_enable, scratch_pad_2; 5133 u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
4994 resource_size_t base_addr; 5134 resource_size_t base_addr;
4995 struct megasas_register_set __iomem *reg_set; 5135 struct megasas_register_set __iomem *reg_set;
4996 struct megasas_ctrl_info *ctrl_info = NULL; 5136 struct megasas_ctrl_info *ctrl_info = NULL;
4997 unsigned long bar_list; 5137 unsigned long bar_list;
4998 int i, loop, fw_msix_count = 0; 5138 int i, j, loop, fw_msix_count = 0;
4999 struct IOV_111 *iovPtr; 5139 struct IOV_111 *iovPtr;
5000 struct fusion_context *fusion; 5140 struct fusion_context *fusion;
5001 5141
@@ -5020,34 +5160,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
5020 5160
5021 reg_set = instance->reg_set; 5161 reg_set = instance->reg_set;
5022 5162
5023 switch (instance->pdev->device) { 5163 if (fusion)
5024 case PCI_DEVICE_ID_LSI_FUSION:
5025 case PCI_DEVICE_ID_LSI_PLASMA:
5026 case PCI_DEVICE_ID_LSI_INVADER:
5027 case PCI_DEVICE_ID_LSI_FURY:
5028 case PCI_DEVICE_ID_LSI_INTRUDER:
5029 case PCI_DEVICE_ID_LSI_INTRUDER_24:
5030 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5031 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5032 instance->instancet = &megasas_instance_template_fusion; 5164 instance->instancet = &megasas_instance_template_fusion;
5033 break; 5165 else {
5034 case PCI_DEVICE_ID_LSI_SAS1078R: 5166 switch (instance->pdev->device) {
5035 case PCI_DEVICE_ID_LSI_SAS1078DE: 5167 case PCI_DEVICE_ID_LSI_SAS1078R:
5036 instance->instancet = &megasas_instance_template_ppc; 5168 case PCI_DEVICE_ID_LSI_SAS1078DE:
5037 break; 5169 instance->instancet = &megasas_instance_template_ppc;
5038 case PCI_DEVICE_ID_LSI_SAS1078GEN2: 5170 break;
5039 case PCI_DEVICE_ID_LSI_SAS0079GEN2: 5171 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5040 instance->instancet = &megasas_instance_template_gen2; 5172 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5041 break; 5173 instance->instancet = &megasas_instance_template_gen2;
5042 case PCI_DEVICE_ID_LSI_SAS0073SKINNY: 5174 break;
5043 case PCI_DEVICE_ID_LSI_SAS0071SKINNY: 5175 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5044 instance->instancet = &megasas_instance_template_skinny; 5176 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5045 break; 5177 instance->instancet = &megasas_instance_template_skinny;
5046 case PCI_DEVICE_ID_LSI_SAS1064R: 5178 break;
5047 case PCI_DEVICE_ID_DELL_PERC5: 5179 case PCI_DEVICE_ID_LSI_SAS1064R:
5048 default: 5180 case PCI_DEVICE_ID_DELL_PERC5:
5049 instance->instancet = &megasas_instance_template_xscale; 5181 default:
5050 break; 5182 instance->instancet = &megasas_instance_template_xscale;
5183 instance->pd_list_not_supported = 1;
5184 break;
5185 }
5051 } 5186 }
5052 5187
5053 if (megasas_transition_to_ready(instance, 0)) { 5188 if (megasas_transition_to_ready(instance, 0)) {
@@ -5066,13 +5201,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
5066 goto fail_ready_state; 5201 goto fail_ready_state;
5067 } 5202 }
5068 5203
5069 /* 5204 if (instance->is_ventura) {
5070 * MSI-X host index 0 is common for all adapter. 5205 scratch_pad_3 =
5071 * It is used for all MPT based Adapters. 5206 readl(&instance->reg_set->outbound_scratch_pad_3);
5072 */ 5207 instance->max_raid_mapsize = ((scratch_pad_3 >>
5073 instance->reply_post_host_index_addr[0] = 5208 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5074 (u32 __iomem *)((u8 __iomem *)instance->reg_set + 5209 MR_MAX_RAID_MAP_SIZE_MASK);
5075 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5210 }
5076 5211
5077 /* Check if MSI-X is supported while in ready state */ 5212 /* Check if MSI-X is supported while in ready state */
5078 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 5213 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
@@ -5092,6 +5227,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
5092 instance->msix_vectors = ((scratch_pad_2 5227 instance->msix_vectors = ((scratch_pad_2
5093 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5228 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5094 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5229 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5230 if (instance->msix_vectors > 16)
5231 instance->msix_combined = true;
5232
5095 if (rdpq_enable) 5233 if (rdpq_enable)
5096 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 5234 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5097 1 : 0; 5235 1 : 0;
@@ -5125,6 +5263,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
5125 else 5263 else
5126 instance->msix_vectors = 0; 5264 instance->msix_vectors = 0;
5127 } 5265 }
5266 /*
5267 * MSI-X host index 0 is common for all adapter.
5268 * It is used for all MPT based Adapters.
5269 */
5270 if (instance->msix_combined) {
5271 instance->reply_post_host_index_addr[0] =
5272 (u32 *)((u8 *)instance->reg_set +
5273 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5274 } else {
5275 instance->reply_post_host_index_addr[0] =
5276 (u32 *)((u8 *)instance->reg_set +
5277 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5278 }
5279
5128 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5280 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5129 if (i < 0) 5281 if (i < 0)
5130 goto fail_setup_irqs; 5282 goto fail_setup_irqs;
@@ -5155,6 +5307,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
5155 if (instance->instancet->init_adapter(instance)) 5307 if (instance->instancet->init_adapter(instance))
5156 goto fail_init_adapter; 5308 goto fail_init_adapter;
5157 5309
5310 if (instance->is_ventura) {
5311 scratch_pad_4 =
5312 readl(&instance->reg_set->outbound_scratch_pad_4);
5313 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5314 MR_DEFAULT_NVME_PAGE_SHIFT)
5315 instance->nvme_page_size =
5316 (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5317
5318 dev_info(&instance->pdev->dev,
5319 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5320 }
5321
5158 if (instance->msix_vectors ? 5322 if (instance->msix_vectors ?
5159 megasas_setup_irqs_msix(instance, 1) : 5323 megasas_setup_irqs_msix(instance, 1) :
5160 megasas_setup_irqs_ioapic(instance)) 5324 megasas_setup_irqs_ioapic(instance))
@@ -5173,13 +5337,43 @@ static int megasas_init_fw(struct megasas_instance *instance)
5173 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 5337 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5174 if (megasas_get_pd_list(instance) < 0) { 5338 if (megasas_get_pd_list(instance) < 0) {
5175 dev_err(&instance->pdev->dev, "failed to get PD list\n"); 5339 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5176 goto fail_get_pd_list; 5340 goto fail_get_ld_pd_list;
5177 } 5341 }
5178 5342
5179 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5343 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5344
5345 /* stream detection initialization */
5346 if (instance->is_ventura && fusion) {
5347 fusion->stream_detect_by_ld =
5348 kzalloc(sizeof(struct LD_STREAM_DETECT *)
5349 * MAX_LOGICAL_DRIVES_EXT,
5350 GFP_KERNEL);
5351 if (!fusion->stream_detect_by_ld) {
5352 dev_err(&instance->pdev->dev,
5353 "unable to allocate stream detection for pool of LDs\n");
5354 goto fail_get_ld_pd_list;
5355 }
5356 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5357 fusion->stream_detect_by_ld[i] =
5358 kmalloc(sizeof(struct LD_STREAM_DETECT),
5359 GFP_KERNEL);
5360 if (!fusion->stream_detect_by_ld[i]) {
5361 dev_err(&instance->pdev->dev,
5362 "unable to allocate stream detect by LD\n ");
5363 for (j = 0; j < i; ++j)
5364 kfree(fusion->stream_detect_by_ld[j]);
5365 kfree(fusion->stream_detect_by_ld);
5366 fusion->stream_detect_by_ld = NULL;
5367 goto fail_get_ld_pd_list;
5368 }
5369 fusion->stream_detect_by_ld[i]->mru_bit_map
5370 = MR_STREAM_BITMAP;
5371 }
5372 }
5373
5180 if (megasas_ld_list_query(instance, 5374 if (megasas_ld_list_query(instance,
5181 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5375 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5182 megasas_get_ld_list(instance); 5376 goto fail_get_ld_pd_list;
5183 5377
5184 /* 5378 /*
5185 * Compute the max allowed sectors per IO: The controller info has two 5379 * Compute the max allowed sectors per IO: The controller info has two
@@ -5296,7 +5490,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5296 5490
5297 return 0; 5491 return 0;
5298 5492
5299fail_get_pd_list: 5493fail_get_ld_pd_list:
5300 instance->instancet->disable_intr(instance); 5494 instance->instancet->disable_intr(instance);
5301fail_init_adapter: 5495fail_init_adapter:
5302 megasas_destroy_irqs(instance); 5496 megasas_destroy_irqs(instance);
@@ -5309,9 +5503,11 @@ fail_ready_state:
5309 instance->ctrl_info = NULL; 5503 instance->ctrl_info = NULL;
5310 iounmap(instance->reg_set); 5504 iounmap(instance->reg_set);
5311 5505
5312 fail_ioremap: 5506fail_ioremap:
5313 pci_release_selected_regions(instance->pdev, 1<<instance->bar); 5507 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5314 5508
5509 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5510 __func__, __LINE__);
5315 return -EINVAL; 5511 return -EINVAL;
5316} 5512}
5317 5513
@@ -5531,6 +5727,98 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5531 return 0; 5727 return 0;
5532} 5728}
5533 5729
5730/* megasas_get_target_prop - Send DCMD with below details to firmware.
5731 *
5732 * This DCMD will fetch few properties of LD/system PD defined
5733 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5734 *
5735 * DCMD send by drivers whenever new target is added to the OS.
5736 *
5737 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
5738 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
5739 * 0 = system PD, 1 = LD.
5740 * dcmd.mbox.s[1] - TargetID for LD/system PD.
5741 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
5742 *
5743 * @instance: Adapter soft state
5744 * @sdev: OS provided scsi device
5745 *
5746 * Returns 0 on success non-zero on failure.
5747 */
5748static int
5749megasas_get_target_prop(struct megasas_instance *instance,
5750 struct scsi_device *sdev)
5751{
5752 int ret;
5753 struct megasas_cmd *cmd;
5754 struct megasas_dcmd_frame *dcmd;
5755 u16 targetId = (sdev->channel % 2) + sdev->id;
5756
5757 cmd = megasas_get_cmd(instance);
5758
5759 if (!cmd) {
5760 dev_err(&instance->pdev->dev,
5761 "Failed to get cmd %s\n", __func__);
5762 return -ENOMEM;
5763 }
5764
5765 dcmd = &cmd->frame->dcmd;
5766
5767 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5768 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5769 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5770
5771 dcmd->mbox.s[1] = cpu_to_le16(targetId);
5772 dcmd->cmd = MFI_CMD_DCMD;
5773 dcmd->cmd_status = 0xFF;
5774 dcmd->sge_count = 1;
5775 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5776 dcmd->timeout = 0;
5777 dcmd->pad_0 = 0;
5778 dcmd->data_xfer_len =
5779 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5780 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5781 dcmd->sgl.sge32[0].phys_addr =
5782 cpu_to_le32(instance->tgt_prop_h);
5783 dcmd->sgl.sge32[0].length =
5784 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5785
5786 if (instance->ctrl_context && !instance->mask_interrupts)
5787 ret = megasas_issue_blocked_cmd(instance,
5788 cmd, MFI_IO_TIMEOUT_SECS);
5789 else
5790 ret = megasas_issue_polled(instance, cmd);
5791
5792 switch (ret) {
5793 case DCMD_TIMEOUT:
5794 switch (dcmd_timeout_ocr_possible(instance)) {
5795 case INITIATE_OCR:
5796 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5797 megasas_reset_fusion(instance->host,
5798 MFI_IO_TIMEOUT_OCR);
5799 break;
5800 case KILL_ADAPTER:
5801 megaraid_sas_kill_hba(instance);
5802 break;
5803 case IGNORE_TIMEOUT:
5804 dev_info(&instance->pdev->dev,
5805 "Ignore DCMD timeout: %s %d\n",
5806 __func__, __LINE__);
5807 break;
5808 }
5809 break;
5810
5811 default:
5812 megasas_return_cmd(instance, cmd);
5813 }
5814 if (ret != DCMD_SUCCESS)
5815 dev_err(&instance->pdev->dev,
5816 "return from %s %d return value %d\n",
5817 __func__, __LINE__, ret);
5818
5819 return ret;
5820}
5821
5534/** 5822/**
5535 * megasas_start_aen - Subscribes to AEN during driver load time 5823 * megasas_start_aen - Subscribes to AEN during driver load time
5536 * @instance: Adapter soft state 5824 * @instance: Adapter soft state
@@ -5714,6 +6002,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
5714 instance->pdev = pdev; 6002 instance->pdev = pdev;
5715 6003
5716 switch (instance->pdev->device) { 6004 switch (instance->pdev->device) {
6005 case PCI_DEVICE_ID_LSI_VENTURA:
6006 case PCI_DEVICE_ID_LSI_HARPOON:
6007 case PCI_DEVICE_ID_LSI_TOMCAT:
6008 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6009 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6010 instance->is_ventura = true;
5717 case PCI_DEVICE_ID_LSI_FUSION: 6011 case PCI_DEVICE_ID_LSI_FUSION:
5718 case PCI_DEVICE_ID_LSI_PLASMA: 6012 case PCI_DEVICE_ID_LSI_PLASMA:
5719 case PCI_DEVICE_ID_LSI_INVADER: 6013 case PCI_DEVICE_ID_LSI_INVADER:
@@ -5723,21 +6017,17 @@ static int megasas_probe_one(struct pci_dev *pdev,
5723 case PCI_DEVICE_ID_LSI_CUTLASS_52: 6017 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5724 case PCI_DEVICE_ID_LSI_CUTLASS_53: 6018 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5725 { 6019 {
5726 instance->ctrl_context_pages = 6020 if (megasas_alloc_fusion_context(instance)) {
5727 get_order(sizeof(struct fusion_context)); 6021 megasas_free_fusion_context(instance);
5728 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
5729 instance->ctrl_context_pages);
5730 if (!instance->ctrl_context) {
5731 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5732 "memory for Fusion context info\n");
5733 goto fail_alloc_dma_buf; 6022 goto fail_alloc_dma_buf;
5734 } 6023 }
5735 fusion = instance->ctrl_context; 6024 fusion = instance->ctrl_context;
5736 memset(fusion, 0, 6025
5737 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
5738 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 6026 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
5739 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) 6027 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
5740 fusion->adapter_type = THUNDERBOLT_SERIES; 6028 fusion->adapter_type = THUNDERBOLT_SERIES;
6029 else if (instance->is_ventura)
6030 fusion->adapter_type = VENTURA_SERIES;
5741 else 6031 else
5742 fusion->adapter_type = INVADER_SERIES; 6032 fusion->adapter_type = INVADER_SERIES;
5743 } 6033 }
@@ -5799,9 +6089,17 @@ static int megasas_probe_one(struct pci_dev *pdev,
5799 instance->pd_info = pci_alloc_consistent(pdev, 6089 instance->pd_info = pci_alloc_consistent(pdev,
5800 sizeof(struct MR_PD_INFO), &instance->pd_info_h); 6090 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
5801 6091
6092 instance->pd_info = pci_alloc_consistent(pdev,
6093 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
6094 instance->tgt_prop = pci_alloc_consistent(pdev,
6095 sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
6096
5802 if (!instance->pd_info) 6097 if (!instance->pd_info)
5803 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); 6098 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
5804 6099
6100 if (!instance->tgt_prop)
6101 dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
6102
5805 instance->crash_dump_buf = pci_alloc_consistent(pdev, 6103 instance->crash_dump_buf = pci_alloc_consistent(pdev,
5806 CRASH_DMA_BUF_SIZE, 6104 CRASH_DMA_BUF_SIZE,
5807 &instance->crash_dump_h); 6105 &instance->crash_dump_h);
@@ -5823,6 +6121,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5823 6121
5824 spin_lock_init(&instance->mfi_pool_lock); 6122 spin_lock_init(&instance->mfi_pool_lock);
5825 spin_lock_init(&instance->hba_lock); 6123 spin_lock_init(&instance->hba_lock);
6124 spin_lock_init(&instance->stream_lock);
5826 spin_lock_init(&instance->completion_lock); 6125 spin_lock_init(&instance->completion_lock);
5827 6126
5828 mutex_init(&instance->reset_mutex); 6127 mutex_init(&instance->reset_mutex);
@@ -5945,6 +6244,10 @@ fail_alloc_dma_buf:
5945 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6244 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
5946 instance->pd_info, 6245 instance->pd_info,
5947 instance->pd_info_h); 6246 instance->pd_info_h);
6247 if (instance->tgt_prop)
6248 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6249 instance->tgt_prop,
6250 instance->tgt_prop_h);
5948 if (instance->producer) 6251 if (instance->producer)
5949 pci_free_consistent(pdev, sizeof(u32), instance->producer, 6252 pci_free_consistent(pdev, sizeof(u32), instance->producer,
5950 instance->producer_h); 6253 instance->producer_h);
@@ -6217,6 +6520,10 @@ fail_init_mfi:
6217 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6520 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6218 instance->pd_info, 6521 instance->pd_info,
6219 instance->pd_info_h); 6522 instance->pd_info_h);
6523 if (instance->tgt_prop)
6524 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6525 instance->tgt_prop,
6526 instance->tgt_prop_h);
6220 if (instance->producer) 6527 if (instance->producer)
6221 pci_free_consistent(pdev, sizeof(u32), instance->producer, 6528 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6222 instance->producer_h); 6529 instance->producer_h);
@@ -6330,6 +6637,14 @@ skip_firing_dcmds:
6330 if (instance->msix_vectors) 6637 if (instance->msix_vectors)
6331 pci_free_irq_vectors(instance->pdev); 6638 pci_free_irq_vectors(instance->pdev);
6332 6639
6640 if (instance->is_ventura) {
6641 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6642 kfree(fusion->stream_detect_by_ld[i]);
6643 kfree(fusion->stream_detect_by_ld);
6644 fusion->stream_detect_by_ld = NULL;
6645 }
6646
6647
6333 if (instance->ctrl_context) { 6648 if (instance->ctrl_context) {
6334 megasas_release_fusion(instance); 6649 megasas_release_fusion(instance);
6335 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 6650 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
@@ -6350,8 +6665,7 @@ skip_firing_dcmds:
6350 fusion->pd_seq_sync[i], 6665 fusion->pd_seq_sync[i],
6351 fusion->pd_seq_phys[i]); 6666 fusion->pd_seq_phys[i]);
6352 } 6667 }
6353 free_pages((ulong)instance->ctrl_context, 6668 megasas_free_fusion_context(instance);
6354 instance->ctrl_context_pages);
6355 } else { 6669 } else {
6356 megasas_release_mfi(instance); 6670 megasas_release_mfi(instance);
6357 pci_free_consistent(pdev, sizeof(u32), 6671 pci_free_consistent(pdev, sizeof(u32),
@@ -6367,11 +6681,14 @@ skip_firing_dcmds:
6367 if (instance->evt_detail) 6681 if (instance->evt_detail)
6368 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6682 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6369 instance->evt_detail, instance->evt_detail_h); 6683 instance->evt_detail, instance->evt_detail_h);
6370
6371 if (instance->pd_info) 6684 if (instance->pd_info)
6372 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6685 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6373 instance->pd_info, 6686 instance->pd_info,
6374 instance->pd_info_h); 6687 instance->pd_info_h);
6688 if (instance->tgt_prop)
6689 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6690 instance->tgt_prop,
6691 instance->tgt_prop_h);
6375 if (instance->vf_affiliation) 6692 if (instance->vf_affiliation)
6376 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * 6693 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6377 sizeof(struct MR_LD_VF_AFFILIATION), 6694 sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6570,6 +6887,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6570 MFI_FRAME_SGL64 | 6887 MFI_FRAME_SGL64 |
6571 MFI_FRAME_SENSE64)); 6888 MFI_FRAME_SENSE64));
6572 6889
6890 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) {
6891 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
6892 megasas_return_cmd(instance, cmd);
6893 return -1;
6894 }
6895 }
6896
6573 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { 6897 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
6574 error = megasas_set_crash_dump_params_ioctl(cmd); 6898 error = megasas_set_crash_dump_params_ioctl(cmd);
6575 megasas_return_cmd(instance, cmd); 6899 megasas_return_cmd(instance, cmd);
@@ -6678,7 +7002,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6678 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + 7002 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
6679 ioc->sense_off); 7003 ioc->sense_off);
6680 7004
6681 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), 7005 if (copy_to_user((void __user *)((unsigned long)
7006 get_unaligned((unsigned long *)sense_ptr)),
6682 sense, ioc->sense_len)) { 7007 sense, ioc->sense_len)) {
6683 dev_err(&instance->pdev->dev, "Failed to copy out to user " 7008 dev_err(&instance->pdev->dev, "Failed to copy out to user "
6684 "sense data\n"); 7009 "sense data\n");
@@ -7047,6 +7372,13 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
7047static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 7372static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
7048 megasas_sysfs_set_dbg_lvl); 7373 megasas_sysfs_set_dbg_lvl);
7049 7374
7375static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7376{
7377 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7378 scsi_remove_device(sdev);
7379 scsi_device_put(sdev);
7380}
7381
7050static void 7382static void
7051megasas_aen_polling(struct work_struct *work) 7383megasas_aen_polling(struct work_struct *work)
7052{ 7384{
@@ -7151,10 +7483,8 @@ megasas_aen_polling(struct work_struct *work)
7151 else 7483 else
7152 scsi_device_put(sdev1); 7484 scsi_device_put(sdev1);
7153 } else { 7485 } else {
7154 if (sdev1) { 7486 if (sdev1)
7155 scsi_remove_device(sdev1); 7487 megasas_remove_scsi_device(sdev1);
7156 scsi_device_put(sdev1);
7157 }
7158 } 7488 }
7159 } 7489 }
7160 } 7490 }
@@ -7171,10 +7501,8 @@ megasas_aen_polling(struct work_struct *work)
7171 else 7501 else
7172 scsi_device_put(sdev1); 7502 scsi_device_put(sdev1);
7173 } else { 7503 } else {
7174 if (sdev1) { 7504 if (sdev1)
7175 scsi_remove_device(sdev1); 7505 megasas_remove_scsi_device(sdev1);
7176 scsi_device_put(sdev1);
7177 }
7178 } 7506 }
7179 } 7507 }
7180 } 7508 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index f237d0003df3..62affa76133d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -77,7 +77,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
77#endif 77#endif
78#define TRUE 1 78#define TRUE 1
79 79
80#define SPAN_DEBUG 0
81#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) 80#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
82#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) 81#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
83#define SPAN_INVALID 0xff 82#define SPAN_INVALID 0xff
@@ -155,12 +154,17 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
155 return map->raidMap.devHndlInfo[pd].curDevHdl; 154 return map->raidMap.devHndlInfo[pd].curDevHdl;
156} 155}
157 156
157static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
158{
159 return map->raidMap.devHndlInfo[pd].interfaceType;
160}
161
158u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) 162u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
159{ 163{
160 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); 164 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
161} 165}
162 166
163u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) 167u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
164{ 168{
165 return map->raidMap.ldTgtIdToLd[ldTgtId]; 169 return map->raidMap.ldTgtIdToLd[ldTgtId];
166} 170}
@@ -179,18 +183,108 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
179 struct fusion_context *fusion = instance->ctrl_context; 183 struct fusion_context *fusion = instance->ctrl_context;
180 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 184 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
181 struct MR_FW_RAID_MAP *pFwRaidMap = NULL; 185 struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
182 int i; 186 int i, j;
183 u16 ld_count; 187 u16 ld_count;
188 struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
189 struct MR_FW_RAID_MAP_EXT *fw_map_ext;
190 struct MR_RAID_MAP_DESC_TABLE *desc_table;
184 191
185 192
186 struct MR_DRV_RAID_MAP_ALL *drv_map = 193 struct MR_DRV_RAID_MAP_ALL *drv_map =
187 fusion->ld_drv_map[(instance->map_id & 1)]; 194 fusion->ld_drv_map[(instance->map_id & 1)];
188 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 195 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
196 void *raid_map_data = NULL;
197
198 memset(drv_map, 0, fusion->drv_map_sz);
199 memset(pDrvRaidMap->ldTgtIdToLd,
200 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
201
202 if (instance->max_raid_mapsize) {
203 fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
204 desc_table =
205 (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
206 if (desc_table != fw_map_dyn->raid_map_desc_table)
207 dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
208 desc_table, fw_map_dyn->raid_map_desc_table);
209
210 ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
211 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
212 pDrvRaidMap->fpPdIoTimeoutSec =
213 fw_map_dyn->fp_pd_io_timeout_sec;
214 pDrvRaidMap->totalSize =
215 cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL));
216 /* point to actual data starting point*/
217 raid_map_data = (void *)fw_map_dyn +
218 le32_to_cpu(fw_map_dyn->desc_table_offset) +
219 le32_to_cpu(fw_map_dyn->desc_table_size);
220
221 for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
222 switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
223 case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
224 fw_map_dyn->dev_hndl_info =
225 (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
226 memcpy(pDrvRaidMap->devHndlInfo,
227 fw_map_dyn->dev_hndl_info,
228 sizeof(struct MR_DEV_HANDLE_INFO) *
229 le32_to_cpu(desc_table->raid_map_desc_elements));
230 break;
231 case RAID_MAP_DESC_TYPE_TGTID_INFO:
232 fw_map_dyn->ld_tgt_id_to_ld =
233 (u16 *)(raid_map_data +
234 le32_to_cpu(desc_table->raid_map_desc_offset));
235 for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
236 pDrvRaidMap->ldTgtIdToLd[j] =
237 le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
238 }
239 break;
240 case RAID_MAP_DESC_TYPE_ARRAY_INFO:
241 fw_map_dyn->ar_map_info =
242 (struct MR_ARRAY_INFO *)
243 (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
244 memcpy(pDrvRaidMap->arMapInfo,
245 fw_map_dyn->ar_map_info,
246 sizeof(struct MR_ARRAY_INFO) *
247 le32_to_cpu(desc_table->raid_map_desc_elements));
248 break;
249 case RAID_MAP_DESC_TYPE_SPAN_INFO:
250 fw_map_dyn->ld_span_map =
251 (struct MR_LD_SPAN_MAP *)
252 (raid_map_data +
253 le32_to_cpu(desc_table->raid_map_desc_offset));
254 memcpy(pDrvRaidMap->ldSpanMap,
255 fw_map_dyn->ld_span_map,
256 sizeof(struct MR_LD_SPAN_MAP) *
257 le32_to_cpu(desc_table->raid_map_desc_elements));
258 break;
259 default:
260 dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
261 fw_map_dyn->desc_table_num_elements);
262 }
263 ++desc_table;
264 }
265
266 } else if (instance->supportmax256vd) {
267 fw_map_ext =
268 (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
269 ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
270 if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
271 dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
272 return;
273 }
274
275 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
276 pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
277 for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
278 pDrvRaidMap->ldTgtIdToLd[i] =
279 (u16)fw_map_ext->ldTgtIdToLd[i];
280 memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
281 sizeof(struct MR_LD_SPAN_MAP) * ld_count);
282 memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
283 sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
284 memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
285 sizeof(struct MR_DEV_HANDLE_INFO) *
286 MAX_RAIDMAP_PHYSICAL_DEVICES);
189 287
190 if (instance->supportmax256vd) {
191 memcpy(fusion->ld_drv_map[instance->map_id & 1],
192 fusion->ld_map[instance->map_id & 1],
193 fusion->current_map_sz);
194 /* New Raid map will not set totalSize, so keep expected value 288 /* New Raid map will not set totalSize, so keep expected value
195 * for legacy code in ValidateMapInfo 289 * for legacy code in ValidateMapInfo
196 */ 290 */
@@ -201,50 +295,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
201 fusion->ld_map[(instance->map_id & 1)]; 295 fusion->ld_map[(instance->map_id & 1)];
202 pFwRaidMap = &fw_map_old->raidMap; 296 pFwRaidMap = &fw_map_old->raidMap;
203 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); 297 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
204
205#if VD_EXT_DEBUG
206 for (i = 0; i < ld_count; i++) {
207 dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
208 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
209 instance->unique_id, i,
210 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
211 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
212 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
213 }
214#endif
215
216 memset(drv_map, 0, fusion->drv_map_sz);
217 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 298 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
218 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 299 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
219 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; 300 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
220 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) 301 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
221 pDrvRaidMap->ldTgtIdToLd[i] = 302 pDrvRaidMap->ldTgtIdToLd[i] =
222 (u8)pFwRaidMap->ldTgtIdToLd[i]; 303 (u8)pFwRaidMap->ldTgtIdToLd[i];
223 for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
224 i < MAX_LOGICAL_DRIVES_EXT; i++)
225 pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
226 for (i = 0; i < ld_count; i++) { 304 for (i = 0; i < ld_count; i++) {
227 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; 305 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
228#if VD_EXT_DEBUG
229 dev_dbg(&instance->pdev->dev,
230 "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
231 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
232 "size 0x%x\n", i, i,
233 pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
234 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
235 (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
236 dev_dbg(&instance->pdev->dev,
237 "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
238 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
239 "size 0x%x\n", i, i,
240 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
241 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
242 (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
243 dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
244 "raid map %p LD RAID MAP %p/%p\n", drv_map,
245 pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
246 &pDrvRaidMap->ldSpanMap[i].ldRaid);
247#endif
248 } 306 }
249 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, 307 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
250 sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); 308 sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
@@ -265,7 +323,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
265 struct LD_LOAD_BALANCE_INFO *lbInfo; 323 struct LD_LOAD_BALANCE_INFO *lbInfo;
266 PLD_SPAN_INFO ldSpanInfo; 324 PLD_SPAN_INFO ldSpanInfo;
267 struct MR_LD_RAID *raid; 325 struct MR_LD_RAID *raid;
268 u16 ldCount, num_lds; 326 u16 num_lds, i;
269 u16 ld; 327 u16 ld;
270 u32 expected_size; 328 u32 expected_size;
271 329
@@ -279,7 +337,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
279 lbInfo = fusion->load_balance_info; 337 lbInfo = fusion->load_balance_info;
280 ldSpanInfo = fusion->log_to_span; 338 ldSpanInfo = fusion->log_to_span;
281 339
282 if (instance->supportmax256vd) 340 if (instance->max_raid_mapsize)
341 expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
342 else if (instance->supportmax256vd)
283 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); 343 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
284 else 344 else
285 expected_size = 345 expected_size =
@@ -287,8 +347,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
287 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount))); 347 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
288 348
289 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { 349 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
290 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", 350 dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
291 (unsigned int) expected_size); 351 le32_to_cpu(pDrvRaidMap->totalSize));
352 dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
353 (unsigned int)expected_size);
292 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", 354 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
293 (unsigned int)sizeof(struct MR_LD_SPAN_MAP), 355 (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
294 le32_to_cpu(pDrvRaidMap->totalSize)); 356 le32_to_cpu(pDrvRaidMap->totalSize));
@@ -298,15 +360,23 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
298 if (instance->UnevenSpanSupport) 360 if (instance->UnevenSpanSupport)
299 mr_update_span_set(drv_map, ldSpanInfo); 361 mr_update_span_set(drv_map, ldSpanInfo);
300 362
301 mr_update_load_balance_params(drv_map, lbInfo); 363 if (lbInfo)
364 mr_update_load_balance_params(drv_map, lbInfo);
302 365
303 num_lds = le16_to_cpu(drv_map->raidMap.ldCount); 366 num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
304 367
305 /*Convert Raid capability values to CPU arch */ 368 /*Convert Raid capability values to CPU arch */
306 for (ldCount = 0; ldCount < num_lds; ldCount++) { 369 for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
307 ld = MR_TargetIdToLdGet(ldCount, drv_map); 370 ld = MR_TargetIdToLdGet(i, drv_map);
371
372 /* For non existing VDs, iterate to next VD*/
373 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
374 continue;
375
308 raid = MR_LdRaidGet(ld, drv_map); 376 raid = MR_LdRaidGet(ld, drv_map);
309 le32_to_cpus((u32 *)&raid->capability); 377 le32_to_cpus((u32 *)&raid->capability);
378
379 num_lds--;
310 } 380 }
311 381
312 return 1; 382 return 1;
@@ -348,91 +418,6 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
348/* 418/*
349****************************************************************************** 419******************************************************************************
350* 420*
351* Function to print info about span set created in driver from FW raid map
352*
353* Inputs :
354* map - LD map
355* ldSpanInfo - ldSpanInfo per HBA instance
356*/
357#if SPAN_DEBUG
358static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
359 PLD_SPAN_INFO ldSpanInfo)
360{
361
362 u8 span;
363 u32 element;
364 struct MR_LD_RAID *raid;
365 LD_SPAN_SET *span_set;
366 struct MR_QUAD_ELEMENT *quad;
367 int ldCount;
368 u16 ld;
369
370 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
371 ld = MR_TargetIdToLdGet(ldCount, map);
372 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
373 continue;
374 raid = MR_LdRaidGet(ld, map);
375 dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
376 ld, raid->spanDepth);
377 for (span = 0; span < raid->spanDepth; span++)
378 dev_dbg(&instance->pdev->dev, "Span=%x,"
379 " number of quads=%x\n", span,
380 le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
381 block_span_info.noElements));
382 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
383 span_set = &(ldSpanInfo[ld].span_set[element]);
384 if (span_set->span_row_data_width == 0)
385 break;
386
387 dev_dbg(&instance->pdev->dev, "Span Set %x:"
388 "width=%x, diff=%x\n", element,
389 (unsigned int)span_set->span_row_data_width,
390 (unsigned int)span_set->diff);
391 dev_dbg(&instance->pdev->dev, "logical LBA"
392 "start=0x%08lx, end=0x%08lx\n",
393 (long unsigned int)span_set->log_start_lba,
394 (long unsigned int)span_set->log_end_lba);
395 dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
396 " end=0x%08lx\n",
397 (long unsigned int)span_set->span_row_start,
398 (long unsigned int)span_set->span_row_end);
399 dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
400 " end=0x%08lx\n",
401 (long unsigned int)span_set->data_row_start,
402 (long unsigned int)span_set->data_row_end);
403 dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
404 " end=0x%08lx\n",
405 (long unsigned int)span_set->data_strip_start,
406 (long unsigned int)span_set->data_strip_end);
407
408 for (span = 0; span < raid->spanDepth; span++) {
409 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
410 block_span_info.noElements) >=
411 element + 1) {
412 quad = &map->raidMap.ldSpanMap[ld].
413 spanBlock[span].block_span_info.
414 quad[element];
415 dev_dbg(&instance->pdev->dev, "Span=%x,"
416 "Quad=%x, diff=%x\n", span,
417 element, le32_to_cpu(quad->diff));
418 dev_dbg(&instance->pdev->dev,
419 "offset_in_span=0x%08lx\n",
420 (long unsigned int)le64_to_cpu(quad->offsetInSpan));
421 dev_dbg(&instance->pdev->dev,
422 "logical start=0x%08lx, end=0x%08lx\n",
423 (long unsigned int)le64_to_cpu(quad->logStart),
424 (long unsigned int)le64_to_cpu(quad->logEnd));
425 }
426 }
427 }
428 }
429 return 0;
430}
431#endif
432
433/*
434******************************************************************************
435*
436* This routine calculates the Span block for given row using spanset. 421* This routine calculates the Span block for given row using spanset.
437* 422*
438* Inputs : 423* Inputs :
@@ -543,19 +528,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
543 else 528 else
544 break; 529 break;
545 } 530 }
546#if SPAN_DEBUG 531
547 dev_info(&instance->pdev->dev, "Strip 0x%llx,"
548 "span_set_Strip 0x%llx, span_set_Row 0x%llx"
549 "data width 0x%llx span offset 0x%x\n", strip,
550 (unsigned long long)span_set_Strip,
551 (unsigned long long)span_set_Row,
552 (unsigned long long)span_set->span_row_data_width,
553 span_offset);
554 dev_info(&instance->pdev->dev, "For strip 0x%llx"
555 "row is 0x%llx\n", strip,
556 (unsigned long long) span_set->data_row_start +
557 (unsigned long long) span_set_Row + (span_offset - 1));
558#endif
559 retval = (span_set->data_row_start + span_set_Row + 532 retval = (span_set->data_row_start + span_set_Row +
560 (span_offset - 1)); 533 (span_offset - 1));
561 return retval; 534 return retval;
@@ -672,11 +645,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
672 else 645 else
673 break; 646 break;
674 } 647 }
675#if SPAN_DEBUG 648
676 dev_info(&instance->pdev->dev, "get_arm_from_strip:"
677 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
678 (long unsigned int)strip, (strip_offset - span_offset));
679#endif
680 retval = (strip_offset - span_offset); 649 retval = (strip_offset - span_offset);
681 return retval; 650 return retval;
682 } 651 }
@@ -737,16 +706,18 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
737 struct MR_DRV_RAID_MAP_ALL *map) 706 struct MR_DRV_RAID_MAP_ALL *map)
738{ 707{
739 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 708 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
740 u32 pd, arRef; 709 u32 pd, arRef, r1_alt_pd;
741 u8 physArm, span; 710 u8 physArm, span;
742 u64 row; 711 u64 row;
743 u8 retval = TRUE; 712 u8 retval = TRUE;
744 u64 *pdBlock = &io_info->pdBlock; 713 u64 *pdBlock = &io_info->pdBlock;
745 __le16 *pDevHandle = &io_info->devHandle; 714 __le16 *pDevHandle = &io_info->devHandle;
715 u8 *pPdInterface = &io_info->pd_interface;
746 u32 logArm, rowMod, armQ, arm; 716 u32 logArm, rowMod, armQ, arm;
747 struct fusion_context *fusion; 717 struct fusion_context *fusion;
748 718
749 fusion = instance->ctrl_context; 719 fusion = instance->ctrl_context;
720 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
750 721
751 /*Get row and span from io_info for Uneven Span IO.*/ 722 /*Get row and span from io_info for Uneven Span IO.*/
752 row = io_info->start_row; 723 row = io_info->start_row;
@@ -772,27 +743,46 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
772 arRef = MR_LdSpanArrayGet(ld, span, map); 743 arRef = MR_LdSpanArrayGet(ld, span, map);
773 pd = MR_ArPdGet(arRef, physArm, map); 744 pd = MR_ArPdGet(arRef, physArm, map);
774 745
775 if (pd != MR_PD_INVALID) 746 if (pd != MR_PD_INVALID) {
776 *pDevHandle = MR_PdDevHandleGet(pd, map); 747 *pDevHandle = MR_PdDevHandleGet(pd, map);
777 else { 748 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
778 *pDevHandle = cpu_to_le16(MR_PD_INVALID); 749 /* get second pd also for raid 1/10 fast path writes*/
750 if (instance->is_ventura &&
751 (raid->level == 1) &&
752 !io_info->isRead) {
753 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
754 if (r1_alt_pd != MR_PD_INVALID)
755 io_info->r1_alt_dev_handle =
756 MR_PdDevHandleGet(r1_alt_pd, map);
757 }
758 } else {
779 if ((raid->level >= 5) && 759 if ((raid->level >= 5) &&
780 ((fusion->adapter_type == THUNDERBOLT_SERIES) || 760 ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
781 ((fusion->adapter_type == INVADER_SERIES) && 761 ((fusion->adapter_type == INVADER_SERIES) &&
782 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 762 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
783 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 763 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
784 else if (raid->level == 1) { 764 else if (raid->level == 1) {
785 physArm = physArm + 1; 765 physArm = physArm + 1;
786 pd = MR_ArPdGet(arRef, physArm, map); 766 pd = MR_ArPdGet(arRef, physArm, map);
787 if (pd != MR_PD_INVALID) 767 if (pd != MR_PD_INVALID) {
788 *pDevHandle = MR_PdDevHandleGet(pd, map); 768 *pDevHandle = MR_PdDevHandleGet(pd, map);
769 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
770 }
789 } 771 }
790 } 772 }
791 773
792 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 774 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
793 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 775 if (instance->is_ventura) {
794 physArm; 776 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
795 io_info->span_arm = pRAID_Context->spanArm; 777 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
778 io_info->span_arm =
779 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
780 } else {
781 pRAID_Context->span_arm =
782 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
783 io_info->span_arm = pRAID_Context->span_arm;
784 }
785 io_info->pd_after_lb = pd;
796 return retval; 786 return retval;
797} 787}
798 788
@@ -819,16 +809,17 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
819 struct MR_DRV_RAID_MAP_ALL *map) 809 struct MR_DRV_RAID_MAP_ALL *map)
820{ 810{
821 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 811 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
822 u32 pd, arRef; 812 u32 pd, arRef, r1_alt_pd;
823 u8 physArm, span; 813 u8 physArm, span;
824 u64 row; 814 u64 row;
825 u8 retval = TRUE; 815 u8 retval = TRUE;
826 u64 *pdBlock = &io_info->pdBlock; 816 u64 *pdBlock = &io_info->pdBlock;
827 __le16 *pDevHandle = &io_info->devHandle; 817 __le16 *pDevHandle = &io_info->devHandle;
818 u8 *pPdInterface = &io_info->pd_interface;
828 struct fusion_context *fusion; 819 struct fusion_context *fusion;
829 820
830 fusion = instance->ctrl_context; 821 fusion = instance->ctrl_context;
831 822 *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
832 823
833 row = mega_div64_32(stripRow, raid->rowDataSize); 824 row = mega_div64_32(stripRow, raid->rowDataSize);
834 825
@@ -867,31 +858,49 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
867 arRef = MR_LdSpanArrayGet(ld, span, map); 858 arRef = MR_LdSpanArrayGet(ld, span, map);
868 pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ 859 pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
869 860
870 if (pd != MR_PD_INVALID) 861 if (pd != MR_PD_INVALID) {
871 /* Get dev handle from Pd. */ 862 /* Get dev handle from Pd. */
872 *pDevHandle = MR_PdDevHandleGet(pd, map); 863 *pDevHandle = MR_PdDevHandleGet(pd, map);
873 else { 864 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
874 /* set dev handle as invalid. */ 865 /* get second pd also for raid 1/10 fast path writes*/
875 *pDevHandle = cpu_to_le16(MR_PD_INVALID); 866 if (instance->is_ventura &&
867 (raid->level == 1) &&
868 !io_info->isRead) {
869 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
870 if (r1_alt_pd != MR_PD_INVALID)
871 io_info->r1_alt_dev_handle =
872 MR_PdDevHandleGet(r1_alt_pd, map);
873 }
874 } else {
876 if ((raid->level >= 5) && 875 if ((raid->level >= 5) &&
877 ((fusion->adapter_type == THUNDERBOLT_SERIES) || 876 ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
878 ((fusion->adapter_type == INVADER_SERIES) && 877 ((fusion->adapter_type == INVADER_SERIES) &&
879 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 878 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
880 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 879 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
881 else if (raid->level == 1) { 880 else if (raid->level == 1) {
882 /* Get alternate Pd. */ 881 /* Get alternate Pd. */
883 physArm = physArm + 1; 882 physArm = physArm + 1;
884 pd = MR_ArPdGet(arRef, physArm, map); 883 pd = MR_ArPdGet(arRef, physArm, map);
885 if (pd != MR_PD_INVALID) 884 if (pd != MR_PD_INVALID) {
886 /* Get dev handle from Pd */ 885 /* Get dev handle from Pd */
887 *pDevHandle = MR_PdDevHandleGet(pd, map); 886 *pDevHandle = MR_PdDevHandleGet(pd, map);
887 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
888 }
888 } 889 }
889 } 890 }
890 891
891 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 892 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
892 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 893 if (instance->is_ventura) {
893 physArm; 894 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
894 io_info->span_arm = pRAID_Context->spanArm; 895 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
896 io_info->span_arm =
897 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
898 } else {
899 pRAID_Context->span_arm =
900 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
901 io_info->span_arm = pRAID_Context->span_arm;
902 }
903 io_info->pd_after_lb = pd;
895 return retval; 904 return retval;
896} 905}
897 906
@@ -912,7 +921,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
912{ 921{
913 struct fusion_context *fusion; 922 struct fusion_context *fusion;
914 struct MR_LD_RAID *raid; 923 struct MR_LD_RAID *raid;
915 u32 ld, stripSize, stripe_mask; 924 u32 stripSize, stripe_mask;
916 u64 endLba, endStrip, endRow, start_row, start_strip; 925 u64 endLba, endStrip, endRow, start_row, start_strip;
917 u64 regStart; 926 u64 regStart;
918 u32 regSize; 927 u32 regSize;
@@ -924,6 +933,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
924 u8 retval = 0; 933 u8 retval = 0;
925 u8 startlba_span = SPAN_INVALID; 934 u8 startlba_span = SPAN_INVALID;
926 u64 *pdBlock = &io_info->pdBlock; 935 u64 *pdBlock = &io_info->pdBlock;
936 u16 ld;
927 937
928 ldStartBlock = io_info->ldStartBlock; 938 ldStartBlock = io_info->ldStartBlock;
929 numBlocks = io_info->numBlocks; 939 numBlocks = io_info->numBlocks;
@@ -935,6 +945,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
935 945
936 ld = MR_TargetIdToLdGet(ldTgtId, map); 946 ld = MR_TargetIdToLdGet(ldTgtId, map);
937 raid = MR_LdRaidGet(ld, map); 947 raid = MR_LdRaidGet(ld, map);
948 /*check read ahead bit*/
949 io_info->ra_capable = raid->capability.ra_capable;
938 950
939 /* 951 /*
940 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero 952 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
@@ -996,17 +1008,6 @@ MR_BuildRaidContext(struct megasas_instance *instance,
996 } 1008 }
997 io_info->start_span = startlba_span; 1009 io_info->start_span = startlba_span;
998 io_info->start_row = start_row; 1010 io_info->start_row = start_row;
999#if SPAN_DEBUG
1000 dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
1001 "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
1002 " span 0x%x\n", __func__, __LINE__,
1003 (unsigned long long)start_row,
1004 (unsigned long long)start_strip,
1005 (unsigned long long)endStrip, startlba_span);
1006 dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
1007 "Start span 0x%x\n", (unsigned long long)start_row,
1008 (unsigned long long)endRow, startlba_span);
1009#endif
1010 } else { 1011 } else {
1011 start_row = mega_div64_32(start_strip, raid->rowDataSize); 1012 start_row = mega_div64_32(start_strip, raid->rowDataSize);
1012 endRow = mega_div64_32(endStrip, raid->rowDataSize); 1013 endRow = mega_div64_32(endStrip, raid->rowDataSize);
@@ -1093,20 +1094,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
1093 regSize += stripSize; 1094 regSize += stripSize;
1094 } 1095 }
1095 1096
1096 pRAID_Context->timeoutValue = 1097 pRAID_Context->timeout_value =
1097 cpu_to_le16(raid->fpIoTimeoutForLd ? 1098 cpu_to_le16(raid->fpIoTimeoutForLd ?
1098 raid->fpIoTimeoutForLd : 1099 raid->fpIoTimeoutForLd :
1099 map->raidMap.fpPdIoTimeoutSec); 1100 map->raidMap.fpPdIoTimeoutSec);
1100 if (fusion->adapter_type == INVADER_SERIES) 1101 if (fusion->adapter_type == INVADER_SERIES)
1101 pRAID_Context->regLockFlags = (isRead) ? 1102 pRAID_Context->reg_lock_flags = (isRead) ?
1102 raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1103 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1103 else 1104 else if (!instance->is_ventura)
1104 pRAID_Context->regLockFlags = (isRead) ? 1105 pRAID_Context->reg_lock_flags = (isRead) ?
1105 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1106 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1106 pRAID_Context->VirtualDiskTgtId = raid->targetId; 1107 pRAID_Context->virtual_disk_tgt_id = raid->targetId;
1107 pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); 1108 pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart);
1108 pRAID_Context->regLockLength = cpu_to_le32(regSize); 1109 pRAID_Context->reg_lock_length = cpu_to_le32(regSize);
1109 pRAID_Context->configSeqNum = raid->seqNum; 1110 pRAID_Context->config_seq_num = raid->seqNum;
1110 /* save pointer to raid->LUN array */ 1111 /* save pointer to raid->LUN array */
1111 *raidLUN = raid->LUN; 1112 *raidLUN = raid->LUN;
1112 1113
@@ -1122,7 +1123,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
1122 ref_in_start_stripe, io_info, 1123 ref_in_start_stripe, io_info,
1123 pRAID_Context, map); 1124 pRAID_Context, map);
1124 /* If IO on an invalid Pd, then FP is not possible.*/ 1125 /* If IO on an invalid Pd, then FP is not possible.*/
1125 if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID)) 1126 if (io_info->devHandle == MR_DEVHANDLE_INVALID)
1126 io_info->fpOkForIo = FALSE; 1127 io_info->fpOkForIo = FALSE;
1127 return retval; 1128 return retval;
1128 } else if (isRead) { 1129 } else if (isRead) {
@@ -1140,12 +1141,6 @@ MR_BuildRaidContext(struct megasas_instance *instance,
1140 return TRUE; 1141 return TRUE;
1141 } 1142 }
1142 } 1143 }
1143
1144#if SPAN_DEBUG
1145 /* Just for testing what arm we get for strip.*/
1146 if (io_info->IoforUnevenSpan)
1147 get_arm_from_strip(instance, ld, start_strip, map);
1148#endif
1149 return TRUE; 1144 return TRUE;
1150} 1145}
1151 1146
@@ -1259,10 +1254,6 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1259 break; 1254 break;
1260 } 1255 }
1261 } 1256 }
1262#if SPAN_DEBUG
1263 getSpanInfo(map, ldSpanInfo);
1264#endif
1265
1266} 1257}
1267 1258
1268void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, 1259void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
@@ -1293,11 +1284,12 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1293} 1284}
1294 1285
1295u8 megasas_get_best_arm_pd(struct megasas_instance *instance, 1286u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1296 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) 1287 struct LD_LOAD_BALANCE_INFO *lbInfo,
1288 struct IO_REQUEST_INFO *io_info,
1289 struct MR_DRV_RAID_MAP_ALL *drv_map)
1297{ 1290{
1298 struct fusion_context *fusion;
1299 struct MR_LD_RAID *raid; 1291 struct MR_LD_RAID *raid;
1300 struct MR_DRV_RAID_MAP_ALL *drv_map; 1292 u16 pd1_dev_handle;
1301 u16 pend0, pend1, ld; 1293 u16 pend0, pend1, ld;
1302 u64 diff0, diff1; 1294 u64 diff0, diff1;
1303 u8 bestArm, pd0, pd1, span, arm; 1295 u8 bestArm, pd0, pd1, span, arm;
@@ -1310,9 +1302,6 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1310 >> RAID_CTX_SPANARM_SPAN_SHIFT); 1302 >> RAID_CTX_SPANARM_SPAN_SHIFT);
1311 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); 1303 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
1312 1304
1313
1314 fusion = instance->ctrl_context;
1315 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1316 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); 1305 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
1317 raid = MR_LdRaidGet(ld, drv_map); 1306 raid = MR_LdRaidGet(ld, drv_map);
1318 span_row_size = instance->UnevenSpanSupport ? 1307 span_row_size = instance->UnevenSpanSupport ?
@@ -1323,47 +1312,52 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1323 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? 1312 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
1324 (arm + 1 - span_row_size) : arm + 1, drv_map); 1313 (arm + 1 - span_row_size) : arm + 1, drv_map);
1325 1314
1326 /* get the pending cmds for the data and mirror arms */ 1315 /* Get PD1 Dev Handle */
1327 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); 1316
1328 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); 1317 pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
1329 1318
1330 /* Determine the disk whose head is nearer to the req. block */ 1319 if (pd1_dev_handle == MR_DEVHANDLE_INVALID) {
1331 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); 1320 bestArm = arm;
1332 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); 1321 } else {
1333 bestArm = (diff0 <= diff1 ? arm : arm ^ 1); 1322 /* get the pending cmds for the data and mirror arms */
1323 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
1324 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1334 1325
1335 if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || 1326 /* Determine the disk whose head is nearer to the req. block */
1336 (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) 1327 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
1337 bestArm ^= 1; 1328 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
1329 bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1330
1331 /* Make balance count from 16 to 4 to
1332 * keep driver in sync with Firmware
1333 */
1334 if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
1335 (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1336 bestArm ^= 1;
1337
1338 /* Update the last accessed block on the correct pd */
1339 io_info->span_arm =
1340 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1341 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1342 }
1338 1343
1339 /* Update the last accessed block on the correct pd */
1340 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1341 lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; 1344 lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
1342 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1343#if SPAN_DEBUG
1344 if (arm != bestArm)
1345 dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
1346 "occur - span 0x%x arm 0x%x bestArm 0x%x "
1347 "io_info->span_arm 0x%x\n",
1348 span, arm, bestArm, io_info->span_arm);
1349#endif
1350 return io_info->pd_after_lb; 1345 return io_info->pd_after_lb;
1351} 1346}
1352 1347
1353__le16 get_updated_dev_handle(struct megasas_instance *instance, 1348__le16 get_updated_dev_handle(struct megasas_instance *instance,
1354 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) 1349 struct LD_LOAD_BALANCE_INFO *lbInfo,
1350 struct IO_REQUEST_INFO *io_info,
1351 struct MR_DRV_RAID_MAP_ALL *drv_map)
1355{ 1352{
1356 u8 arm_pd; 1353 u8 arm_pd;
1357 __le16 devHandle; 1354 __le16 devHandle;
1358 struct fusion_context *fusion;
1359 struct MR_DRV_RAID_MAP_ALL *drv_map;
1360
1361 fusion = instance->ctrl_context;
1362 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1363 1355
1364 /* get best new arm (PD ID) */ 1356 /* get best new arm (PD ID) */
1365 arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info); 1357 arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map);
1366 devHandle = MR_PdDevHandleGet(arm_pd, drv_map); 1358 devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1359 io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
1367 atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); 1360 atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1361
1368 return devHandle; 1362 return devHandle;
1369} 1363}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 24778ba4b6e8..29650ba669da 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -47,6 +47,7 @@
47#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/mutex.h> 48#include <linux/mutex.h>
49#include <linux/poll.h> 49#include <linux/poll.h>
50#include <linux/vmalloc.h>
50 51
51#include <scsi/scsi.h> 52#include <scsi/scsi.h>
52#include <scsi/scsi_cmnd.h> 53#include <scsi/scsi_cmnd.h>
@@ -181,32 +182,44 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
181 struct megasas_cmd_fusion *cmd) 182 struct megasas_cmd_fusion *cmd)
182{ 183{
183 cmd->scmd = NULL; 184 cmd->scmd = NULL;
184 memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 185 memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
186 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
187 cmd->cmd_completed = false;
185} 188}
186 189
187/** 190/**
188 * megasas_fire_cmd_fusion - Sends command to the FW 191 * megasas_fire_cmd_fusion - Sends command to the FW
192 * @instance: Adapter soft state
193 * @req_desc: 32bit or 64bit Request descriptor
194 *
195 * Perform PCI Write. Ventura supports 32 bit Descriptor.
196 * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
189 */ 197 */
198
190static void 199static void
191megasas_fire_cmd_fusion(struct megasas_instance *instance, 200megasas_fire_cmd_fusion(struct megasas_instance *instance,
192 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 201 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
193{ 202{
203 if (instance->is_ventura)
204 writel(le32_to_cpu(req_desc->u.low),
205 &instance->reg_set->inbound_single_queue_port);
206 else {
194#if defined(writeq) && defined(CONFIG_64BIT) 207#if defined(writeq) && defined(CONFIG_64BIT)
195 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 208 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
196 le32_to_cpu(req_desc->u.low)); 209 le32_to_cpu(req_desc->u.low));
197 210
198 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 211 writeq(req_data, &instance->reg_set->inbound_low_queue_port);
199#else 212#else
200 unsigned long flags; 213 unsigned long flags;
201 214 spin_lock_irqsave(&instance->hba_lock, flags);
202 spin_lock_irqsave(&instance->hba_lock, flags); 215 writel(le32_to_cpu(req_desc->u.low),
203 writel(le32_to_cpu(req_desc->u.low), 216 &instance->reg_set->inbound_low_queue_port);
204 &instance->reg_set->inbound_low_queue_port); 217 writel(le32_to_cpu(req_desc->u.high),
205 writel(le32_to_cpu(req_desc->u.high), 218 &instance->reg_set->inbound_high_queue_port);
206 &instance->reg_set->inbound_high_queue_port); 219 mmiowb();
207 mmiowb(); 220 spin_unlock_irqrestore(&instance->hba_lock, flags);
208 spin_unlock_irqrestore(&instance->hba_lock, flags);
209#endif 221#endif
222 }
210} 223}
211 224
212/** 225/**
@@ -229,7 +242,10 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
229 242
230 reg_set = instance->reg_set; 243 reg_set = instance->reg_set;
231 244
232 cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; 245 /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
246 if (!instance->is_ventura)
247 cur_max_fw_cmds =
248 readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
233 249
234 if (dual_qdepth_disable || !cur_max_fw_cmds) 250 if (dual_qdepth_disable || !cur_max_fw_cmds)
235 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 251 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
@@ -243,7 +259,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
243 259
244 if (fw_boot_context == OCR_CONTEXT) { 260 if (fw_boot_context == OCR_CONTEXT) {
245 cur_max_fw_cmds = cur_max_fw_cmds - 1; 261 cur_max_fw_cmds = cur_max_fw_cmds - 1;
246 if (cur_max_fw_cmds <= instance->max_fw_cmds) { 262 if (cur_max_fw_cmds < instance->max_fw_cmds) {
247 instance->cur_can_queue = 263 instance->cur_can_queue =
248 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + 264 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
249 MEGASAS_FUSION_IOCTL_CMDS); 265 MEGASAS_FUSION_IOCTL_CMDS);
@@ -255,7 +271,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
255 instance->ldio_threshold = ldio_threshold; 271 instance->ldio_threshold = ldio_threshold;
256 272
257 if (!instance->is_rdpq) 273 if (!instance->is_rdpq)
258 instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); 274 instance->max_fw_cmds =
275 min_t(u16, instance->max_fw_cmds, 1024);
259 276
260 if (reset_devices) 277 if (reset_devices)
261 instance->max_fw_cmds = min(instance->max_fw_cmds, 278 instance->max_fw_cmds = min(instance->max_fw_cmds,
@@ -271,7 +288,14 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
271 (MEGASAS_FUSION_INTERNAL_CMDS + 288 (MEGASAS_FUSION_INTERNAL_CMDS +
272 MEGASAS_FUSION_IOCTL_CMDS); 289 MEGASAS_FUSION_IOCTL_CMDS);
273 instance->cur_can_queue = instance->max_scsi_cmds; 290 instance->cur_can_queue = instance->max_scsi_cmds;
291 instance->host->can_queue = instance->cur_can_queue;
274 } 292 }
293
294 if (instance->is_ventura)
295 instance->max_mpt_cmds =
296 instance->max_fw_cmds * RAID_1_PEER_CMDS;
297 else
298 instance->max_mpt_cmds = instance->max_fw_cmds;
275} 299}
276/** 300/**
277 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 301 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
@@ -285,7 +309,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
285 struct megasas_cmd_fusion *cmd; 309 struct megasas_cmd_fusion *cmd;
286 310
287 /* SG, Sense */ 311 /* SG, Sense */
288 for (i = 0; i < instance->max_fw_cmds; i++) { 312 for (i = 0; i < instance->max_mpt_cmds; i++) {
289 cmd = fusion->cmd_list[i]; 313 cmd = fusion->cmd_list[i];
290 if (cmd) { 314 if (cmd) {
291 if (cmd->sg_frame) 315 if (cmd->sg_frame)
@@ -329,7 +353,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
329 353
330 354
331 /* cmd_list */ 355 /* cmd_list */
332 for (i = 0; i < instance->max_fw_cmds; i++) 356 for (i = 0; i < instance->max_mpt_cmds; i++)
333 kfree(fusion->cmd_list[i]); 357 kfree(fusion->cmd_list[i]);
334 358
335 kfree(fusion->cmd_list); 359 kfree(fusion->cmd_list);
@@ -343,7 +367,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
343static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) 367static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
344{ 368{
345 int i; 369 int i;
346 u32 max_cmd; 370 u16 max_cmd;
347 struct fusion_context *fusion; 371 struct fusion_context *fusion;
348 struct megasas_cmd_fusion *cmd; 372 struct megasas_cmd_fusion *cmd;
349 373
@@ -353,7 +377,8 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
353 377
354 fusion->sg_dma_pool = 378 fusion->sg_dma_pool =
355 pci_pool_create("mr_sg", instance->pdev, 379 pci_pool_create("mr_sg", instance->pdev,
356 instance->max_chain_frame_sz, 4, 0); 380 instance->max_chain_frame_sz,
381 MR_DEFAULT_NVME_PAGE_SIZE, 0);
357 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ 382 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
358 fusion->sense_dma_pool = 383 fusion->sense_dma_pool =
359 pci_pool_create("mr_sense", instance->pdev, 384 pci_pool_create("mr_sense", instance->pdev,
@@ -381,33 +406,47 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
381 return -ENOMEM; 406 return -ENOMEM;
382 } 407 }
383 } 408 }
409
410 /* create sense buffer for the raid 1/10 fp */
411 for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
412 cmd = fusion->cmd_list[i];
413 cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
414 GFP_KERNEL, &cmd->sense_phys_addr);
415 if (!cmd->sense) {
416 dev_err(&instance->pdev->dev,
417 "Failed from %s %d\n", __func__, __LINE__);
418 return -ENOMEM;
419 }
420 }
421
384 return 0; 422 return 0;
385} 423}
386 424
387int 425int
388megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) 426megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
389{ 427{
390 u32 max_cmd, i; 428 u32 max_mpt_cmd, i;
391 struct fusion_context *fusion; 429 struct fusion_context *fusion;
392 430
393 fusion = instance->ctrl_context; 431 fusion = instance->ctrl_context;
394 432
395 max_cmd = instance->max_fw_cmds; 433 max_mpt_cmd = instance->max_mpt_cmds;
396 434
397 /* 435 /*
398 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. 436 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
399 * Allocate the dynamic array first and then allocate individual 437 * Allocate the dynamic array first and then allocate individual
400 * commands. 438 * commands.
401 */ 439 */
402 fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd, 440 fusion->cmd_list =
403 GFP_KERNEL); 441 kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd,
442 GFP_KERNEL);
404 if (!fusion->cmd_list) { 443 if (!fusion->cmd_list) {
405 dev_err(&instance->pdev->dev, 444 dev_err(&instance->pdev->dev,
406 "Failed from %s %d\n", __func__, __LINE__); 445 "Failed from %s %d\n", __func__, __LINE__);
407 return -ENOMEM; 446 return -ENOMEM;
408 } 447 }
409 448
410 for (i = 0; i < max_cmd; i++) { 449 for (i = 0; i < max_mpt_cmd; i++) {
411 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), 450 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
412 GFP_KERNEL); 451 GFP_KERNEL);
413 if (!fusion->cmd_list[i]) { 452 if (!fusion->cmd_list[i]) {
@@ -539,7 +578,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
539 } 578 }
540 579
541 fusion->rdpq_virt[i].RDPQBaseAddress = 580 fusion->rdpq_virt[i].RDPQBaseAddress =
542 fusion->reply_frames_desc_phys[i]; 581 cpu_to_le64(fusion->reply_frames_desc_phys[i]);
543 582
544 reply_desc = fusion->reply_frames_desc[i]; 583 reply_desc = fusion->reply_frames_desc[i];
545 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) 584 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
@@ -642,13 +681,14 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
642 */ 681 */
643 682
644 /* SMID 0 is reserved. Set SMID/index from 1 */ 683 /* SMID 0 is reserved. Set SMID/index from 1 */
645 for (i = 0; i < instance->max_fw_cmds; i++) { 684 for (i = 0; i < instance->max_mpt_cmds; i++) {
646 cmd = fusion->cmd_list[i]; 685 cmd = fusion->cmd_list[i];
647 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 686 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
648 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 687 memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
649 cmd->index = i + 1; 688 cmd->index = i + 1;
650 cmd->scmd = NULL; 689 cmd->scmd = NULL;
651 cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ? 690 cmd->sync_cmd_idx =
691 (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
652 (i - instance->max_scsi_cmds) : 692 (i - instance->max_scsi_cmds) :
653 (u32)ULONG_MAX; /* Set to Invalid */ 693 (u32)ULONG_MAX; /* Set to Invalid */
654 cmd->instance = instance; 694 cmd->instance = instance;
@@ -658,6 +698,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
658 memset(cmd->io_request, 0, 698 memset(cmd->io_request, 0,
659 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 699 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
660 cmd->io_request_phys_addr = io_req_base_phys + offset; 700 cmd->io_request_phys_addr = io_req_base_phys + offset;
701 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
661 } 702 }
662 703
663 if (megasas_create_sg_sense_fusion(instance)) 704 if (megasas_create_sg_sense_fusion(instance))
@@ -725,6 +766,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
725 const char *sys_info; 766 const char *sys_info;
726 MFI_CAPABILITIES *drv_ops; 767 MFI_CAPABILITIES *drv_ops;
727 u32 scratch_pad_2; 768 u32 scratch_pad_2;
769 unsigned long flags;
728 770
729 fusion = instance->ctrl_context; 771 fusion = instance->ctrl_context;
730 772
@@ -781,6 +823,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
781 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; 823 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
782 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 824 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
783 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 825 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
826 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
784 init_frame = (struct megasas_init_frame *)cmd->frame; 827 init_frame = (struct megasas_init_frame *)cmd->frame;
785 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 828 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
786 829
@@ -796,7 +839,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
796 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); 839 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
797 840
798 /* driver support Extended MSIX */ 841 /* driver support Extended MSIX */
799 if (fusion->adapter_type == INVADER_SERIES) 842 if (fusion->adapter_type >= INVADER_SERIES)
800 drv_ops->mfi_capabilities.support_additional_msix = 1; 843 drv_ops->mfi_capabilities.support_additional_msix = 1;
801 /* driver supports HA / Remote LUN over Fast Path interface */ 844 /* driver supports HA / Remote LUN over Fast Path interface */
802 drv_ops->mfi_capabilities.support_fp_remote_lun = 1; 845 drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
@@ -813,6 +856,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
813 drv_ops->mfi_capabilities.support_ext_queue_depth = 1; 856 drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
814 857
815 drv_ops->mfi_capabilities.support_qd_throttling = 1; 858 drv_ops->mfi_capabilities.support_qd_throttling = 1;
859 drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
816 /* Convert capability to LE32 */ 860 /* Convert capability to LE32 */
817 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 861 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
818 862
@@ -850,7 +894,14 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
850 break; 894 break;
851 } 895 }
852 896
853 megasas_fire_cmd_fusion(instance, &req_desc); 897 /* For Ventura also IOC INIT required 64 bit Descriptor write. */
898 spin_lock_irqsave(&instance->hba_lock, flags);
899 writel(le32_to_cpu(req_desc.u.low),
900 &instance->reg_set->inbound_low_queue_port);
901 writel(le32_to_cpu(req_desc.u.high),
902 &instance->reg_set->inbound_high_queue_port);
903 mmiowb();
904 spin_unlock_irqrestore(&instance->hba_lock, flags);
854 905
855 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 906 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
856 907
@@ -1009,11 +1060,6 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
1009 1060
1010 memset(ci, 0, fusion->max_map_sz); 1061 memset(ci, 0, fusion->max_map_sz);
1011 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1062 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1012#if VD_EXT_DEBUG
1013 dev_dbg(&instance->pdev->dev,
1014 "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
1015 __func__, cpu_to_le32(size_map_info));
1016#endif
1017 dcmd->cmd = MFI_CMD_DCMD; 1063 dcmd->cmd = MFI_CMD_DCMD;
1018 dcmd->cmd_status = 0xFF; 1064 dcmd->cmd_status = 0xFF;
1019 dcmd->sge_count = 1; 1065 dcmd->sge_count = 1;
@@ -1065,10 +1111,11 @@ megasas_get_map_info(struct megasas_instance *instance)
1065int 1111int
1066megasas_sync_map_info(struct megasas_instance *instance) 1112megasas_sync_map_info(struct megasas_instance *instance)
1067{ 1113{
1068 int ret = 0, i; 1114 int i;
1069 struct megasas_cmd *cmd; 1115 struct megasas_cmd *cmd;
1070 struct megasas_dcmd_frame *dcmd; 1116 struct megasas_dcmd_frame *dcmd;
1071 u32 size_sync_info, num_lds; 1117 u16 num_lds;
1118 u32 size_sync_info;
1072 struct fusion_context *fusion; 1119 struct fusion_context *fusion;
1073 struct MR_LD_TARGET_SYNC *ci = NULL; 1120 struct MR_LD_TARGET_SYNC *ci = NULL;
1074 struct MR_DRV_RAID_MAP_ALL *map; 1121 struct MR_DRV_RAID_MAP_ALL *map;
@@ -1134,7 +1181,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
1134 1181
1135 instance->instancet->issue_dcmd(instance, cmd); 1182 instance->instancet->issue_dcmd(instance, cmd);
1136 1183
1137 return ret; 1184 return 0;
1138} 1185}
1139 1186
1140/* 1187/*
@@ -1220,7 +1267,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1220{ 1267{
1221 struct megasas_register_set __iomem *reg_set; 1268 struct megasas_register_set __iomem *reg_set;
1222 struct fusion_context *fusion; 1269 struct fusion_context *fusion;
1223 u32 max_cmd, scratch_pad_2; 1270 u16 max_cmd;
1271 u32 scratch_pad_2;
1224 int i = 0, count; 1272 int i = 0, count;
1225 1273
1226 fusion = instance->ctrl_context; 1274 fusion = instance->ctrl_context;
@@ -1230,13 +1278,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1230 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); 1278 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
1231 1279
1232 /* 1280 /*
1233 * Reduce the max supported cmds by 1. This is to ensure that the
1234 * reply_q_sz (1 more than the max cmd that driver may send)
1235 * does not exceed max cmds that the FW can support
1236 */
1237 instance->max_fw_cmds = instance->max_fw_cmds-1;
1238
1239 /*
1240 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames 1281 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1241 */ 1282 */
1242 instance->max_mfi_cmds = 1283 instance->max_mfi_cmds =
@@ -1247,12 +1288,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1247 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16); 1288 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1248 1289
1249 fusion->request_alloc_sz = 1290 fusion->request_alloc_sz =
1250 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; 1291 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
1251 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) 1292 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1252 *(fusion->reply_q_depth); 1293 *(fusion->reply_q_depth);
1253 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 1294 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1254 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * 1295 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1255 (max_cmd + 1)); /* Extra 1 for SMID 0 */ 1296 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
1256 1297
1257 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2); 1298 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1258 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 1299 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
@@ -1302,7 +1343,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1302 fusion->last_reply_idx[i] = 0; 1343 fusion->last_reply_idx[i] = 0;
1303 1344
1304 /* 1345 /*
1305 * For fusion adapters, 3 commands for IOCTL and 5 commands 1346 * For fusion adapters, 3 commands for IOCTL and 8 commands
1306 * for driver's internal DCMDs. 1347 * for driver's internal DCMDs.
1307 */ 1348 */
1308 instance->max_scsi_cmds = instance->max_fw_cmds - 1349 instance->max_scsi_cmds = instance->max_fw_cmds -
@@ -1331,6 +1372,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1331 } 1372 }
1332 1373
1333 instance->flag_ieee = 1; 1374 instance->flag_ieee = 1;
1375 instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT;
1334 fusion->fast_path_io = 0; 1376 fusion->fast_path_io = 0;
1335 1377
1336 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1378 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
@@ -1388,96 +1430,348 @@ fail_alloc_mfi_cmds:
1388 */ 1430 */
1389 1431
1390void 1432void
1391map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) 1433map_cmd_status(struct fusion_context *fusion,
1434 struct scsi_cmnd *scmd, u8 status, u8 ext_status,
1435 u32 data_length, u8 *sense)
1392{ 1436{
1437 u8 cmd_type;
1438 int resid;
1393 1439
1440 cmd_type = megasas_cmd_type(scmd);
1394 switch (status) { 1441 switch (status) {
1395 1442
1396 case MFI_STAT_OK: 1443 case MFI_STAT_OK:
1397 cmd->scmd->result = DID_OK << 16; 1444 scmd->result = DID_OK << 16;
1398 break; 1445 break;
1399 1446
1400 case MFI_STAT_SCSI_IO_FAILED: 1447 case MFI_STAT_SCSI_IO_FAILED:
1401 case MFI_STAT_LD_INIT_IN_PROGRESS: 1448 case MFI_STAT_LD_INIT_IN_PROGRESS:
1402 cmd->scmd->result = (DID_ERROR << 16) | ext_status; 1449 scmd->result = (DID_ERROR << 16) | ext_status;
1403 break; 1450 break;
1404 1451
1405 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1452 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1406 1453
1407 cmd->scmd->result = (DID_OK << 16) | ext_status; 1454 scmd->result = (DID_OK << 16) | ext_status;
1408 if (ext_status == SAM_STAT_CHECK_CONDITION) { 1455 if (ext_status == SAM_STAT_CHECK_CONDITION) {
1409 memset(cmd->scmd->sense_buffer, 0, 1456 memset(scmd->sense_buffer, 0,
1410 SCSI_SENSE_BUFFERSIZE); 1457 SCSI_SENSE_BUFFERSIZE);
1411 memcpy(cmd->scmd->sense_buffer, cmd->sense, 1458 memcpy(scmd->sense_buffer, sense,
1412 SCSI_SENSE_BUFFERSIZE); 1459 SCSI_SENSE_BUFFERSIZE);
1413 cmd->scmd->result |= DRIVER_SENSE << 24; 1460 scmd->result |= DRIVER_SENSE << 24;
1414 } 1461 }
1462
1463 /*
1464 * If the IO request is partially completed, then MR FW will
1465 * update "io_request->DataLength" field with actual number of
1466 * bytes transferred.Driver will set residual bytes count in
1467 * SCSI command structure.
1468 */
1469 resid = (scsi_bufflen(scmd) - data_length);
1470 scsi_set_resid(scmd, resid);
1471
1472 if (resid &&
1473 ((cmd_type == READ_WRITE_LDIO) ||
1474 (cmd_type == READ_WRITE_SYSPDIO)))
1475 scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len"
1476 " requested/completed 0x%x/0x%x\n",
1477 status, scsi_bufflen(scmd), data_length);
1415 break; 1478 break;
1416 1479
1417 case MFI_STAT_LD_OFFLINE: 1480 case MFI_STAT_LD_OFFLINE:
1418 case MFI_STAT_DEVICE_NOT_FOUND: 1481 case MFI_STAT_DEVICE_NOT_FOUND:
1419 cmd->scmd->result = DID_BAD_TARGET << 16; 1482 scmd->result = DID_BAD_TARGET << 16;
1420 break; 1483 break;
1421 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1484 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1422 cmd->scmd->result = DID_IMM_RETRY << 16; 1485 scmd->result = DID_IMM_RETRY << 16;
1423 break; 1486 break;
1424 default: 1487 default:
1425 dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status); 1488 scmd->result = DID_ERROR << 16;
1426 cmd->scmd->result = DID_ERROR << 16;
1427 break; 1489 break;
1428 } 1490 }
1429} 1491}
1430 1492
1431/** 1493/**
1494 * megasas_is_prp_possible -
1495 * Checks if native NVMe PRPs can be built for the IO
1496 *
1497 * @instance: Adapter soft state
1498 * @scmd: SCSI command from the mid-layer
1499 * @sge_count: scatter gather element count.
1500 *
1501 * Returns: true: PRPs can be built
1502 * false: IEEE SGLs needs to be built
1503 */
1504static bool
1505megasas_is_prp_possible(struct megasas_instance *instance,
1506 struct scsi_cmnd *scmd, int sge_count)
1507{
1508 struct fusion_context *fusion;
1509 int i;
1510 u32 data_length = 0;
1511 struct scatterlist *sg_scmd;
1512 bool build_prp = false;
1513 u32 mr_nvme_pg_size;
1514
1515 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1516 MR_DEFAULT_NVME_PAGE_SIZE);
1517 fusion = instance->ctrl_context;
1518 data_length = scsi_bufflen(scmd);
1519 sg_scmd = scsi_sglist(scmd);
1520
1521 /*
1522 * NVMe uses one PRP for each page (or part of a page)
1523 * look at the data length - if 4 pages or less then IEEE is OK
1524 * if > 5 pages then we need to build a native SGL
1525 * if > 4 and <= 5 pages, then check physical address of 1st SG entry
1526 * if this first size in the page is >= the residual beyond 4 pages
1527 * then use IEEE, otherwise use native SGL
1528 */
1529
1530 if (data_length > (mr_nvme_pg_size * 5)) {
1531 build_prp = true;
1532 } else if ((data_length > (mr_nvme_pg_size * 4)) &&
1533 (data_length <= (mr_nvme_pg_size * 5))) {
1534 /* check if 1st SG entry size is < residual beyond 4 pages */
1535 if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4)))
1536 build_prp = true;
1537 }
1538
1539/*
1540 * Below code detects gaps/holes in IO data buffers.
1541 * What does holes/gaps mean?
1542 * Any SGE except first one in a SGL starts at non NVME page size
1543 * aligned address OR Any SGE except last one in a SGL ends at
1544 * non NVME page size boundary.
1545 *
1546 * Driver has already informed block layer by setting boundary rules for
1547 * bio merging done at NVME page size boundary calling kernel API
1548 * blk_queue_virt_boundary inside slave_config.
1549 * Still there is possibility of IO coming with holes to driver because of
1550 * IO merging done by IO scheduler.
1551 *
1552 * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
1553 * IO scheduling so no IO merging.
1554 *
1555 * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
1556 * then sending IOs with holes.
1557 *
1558 * Though driver can request block layer to disable IO merging by calling-
1559 * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
1560 * user may tune sysfs parameter- nomerges again to 0 or 1.
1561 *
1562 * If in future IO scheduling is enabled with SCSI BLK MQ,
1563 * this algorithm to detect holes will be required in driver
1564 * for SCSI BLK MQ enabled case as well.
1565 *
1566 *
1567 */
1568 scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
1569 if ((i != 0) && (i != (sge_count - 1))) {
1570 if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
1571 mega_mod64(sg_dma_address(sg_scmd),
1572 mr_nvme_pg_size)) {
1573 build_prp = false;
1574 atomic_inc(&instance->sge_holes_type1);
1575 break;
1576 }
1577 }
1578
1579 if ((sge_count > 1) && (i == 0)) {
1580 if ((mega_mod64((sg_dma_address(sg_scmd) +
1581 sg_dma_len(sg_scmd)),
1582 mr_nvme_pg_size))) {
1583 build_prp = false;
1584 atomic_inc(&instance->sge_holes_type2);
1585 break;
1586 }
1587 }
1588
1589 if ((sge_count > 1) && (i == (sge_count - 1))) {
1590 if (mega_mod64(sg_dma_address(sg_scmd),
1591 mr_nvme_pg_size)) {
1592 build_prp = false;
1593 atomic_inc(&instance->sge_holes_type3);
1594 break;
1595 }
1596 }
1597 }
1598
1599 return build_prp;
1600}
1601
1602/**
1603 * megasas_make_prp_nvme -
1604 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1605 *
1606 * @instance: Adapter soft state
1607 * @scmd: SCSI command from the mid-layer
1608 * @sgl_ptr: SGL to be filled in
1609 * @cmd: Fusion command frame
1610 * @sge_count: scatter gather element count.
1611 *
1612 * Returns: true: PRPs are built
1613 * false: IEEE SGLs needs to be built
1614 */
1615static bool
1616megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
1617 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1618 struct megasas_cmd_fusion *cmd, int sge_count)
1619{
1620 int sge_len, offset, num_prp_in_chain = 0;
1621 struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl;
1622 u64 *ptr_sgl;
1623 dma_addr_t ptr_sgl_phys;
1624 u64 sge_addr;
1625 u32 page_mask, page_mask_result;
1626 struct scatterlist *sg_scmd;
1627 u32 first_prp_len;
1628 bool build_prp = false;
1629 int data_len = scsi_bufflen(scmd);
1630 struct fusion_context *fusion;
1631 u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1632 MR_DEFAULT_NVME_PAGE_SIZE);
1633
1634 fusion = instance->ctrl_context;
1635
1636 build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
1637
1638 if (!build_prp)
1639 return false;
1640
1641 /*
1642 * Nvme has a very convoluted prp format. One prp is required
1643 * for each page or partial page. Driver need to split up OS sg_list
1644 * entries if it is longer than one page or cross a page
1645 * boundary. Driver also have to insert a PRP list pointer entry as
1646 * the last entry in each physical page of the PRP list.
1647 *
1648 * NOTE: The first PRP "entry" is actually placed in the first
1649 * SGL entry in the main message as IEEE 64 format. The 2nd
1650 * entry in the main message is the chain element, and the rest
1651 * of the PRP entries are built in the contiguous pcie buffer.
1652 */
1653 page_mask = mr_nvme_pg_size - 1;
1654 ptr_sgl = (u64 *)cmd->sg_frame;
1655 ptr_sgl_phys = cmd->sg_frame_phys_addr;
1656 memset(ptr_sgl, 0, instance->max_chain_frame_sz);
1657
1658 /* Build chain frame element which holds all prps except first*/
1659 main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
1660 ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
1661
1662 main_chain_element->Address = cpu_to_le64(ptr_sgl_phys);
1663 main_chain_element->NextChainOffset = 0;
1664 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1665 IEEE_SGE_FLAGS_SYSTEM_ADDR |
1666 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1667
1668 /* Build first prp, sge need not to be page aligned*/
1669 ptr_first_sgl = sgl_ptr;
1670 sg_scmd = scsi_sglist(scmd);
1671 sge_addr = sg_dma_address(sg_scmd);
1672 sge_len = sg_dma_len(sg_scmd);
1673
1674 offset = (u32)(sge_addr & page_mask);
1675 first_prp_len = mr_nvme_pg_size - offset;
1676
1677 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
1678 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
1679
1680 data_len -= first_prp_len;
1681
1682 if (sge_len > first_prp_len) {
1683 sge_addr += first_prp_len;
1684 sge_len -= first_prp_len;
1685 } else if (sge_len == first_prp_len) {
1686 sg_scmd = sg_next(sg_scmd);
1687 sge_addr = sg_dma_address(sg_scmd);
1688 sge_len = sg_dma_len(sg_scmd);
1689 }
1690
1691 for (;;) {
1692 offset = (u32)(sge_addr & page_mask);
1693
1694 /* Put PRP pointer due to page boundary*/
1695 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
1696 if (unlikely(!page_mask_result)) {
1697 scmd_printk(KERN_NOTICE,
1698 scmd, "page boundary ptr_sgl: 0x%p\n",
1699 ptr_sgl);
1700 ptr_sgl_phys += 8;
1701 *ptr_sgl = cpu_to_le64(ptr_sgl_phys);
1702 ptr_sgl++;
1703 num_prp_in_chain++;
1704 }
1705
1706 *ptr_sgl = cpu_to_le64(sge_addr);
1707 ptr_sgl++;
1708 ptr_sgl_phys += 8;
1709 num_prp_in_chain++;
1710
1711 sge_addr += mr_nvme_pg_size;
1712 sge_len -= mr_nvme_pg_size;
1713 data_len -= mr_nvme_pg_size;
1714
1715 if (data_len <= 0)
1716 break;
1717
1718 if (sge_len > 0)
1719 continue;
1720
1721 sg_scmd = sg_next(sg_scmd);
1722 sge_addr = sg_dma_address(sg_scmd);
1723 sge_len = sg_dma_len(sg_scmd);
1724 }
1725
1726 main_chain_element->Length =
1727 cpu_to_le32(num_prp_in_chain * sizeof(u64));
1728
1729 atomic_inc(&instance->prp_sgl);
1730 return build_prp;
1731}
1732
1733/**
1432 * megasas_make_sgl_fusion - Prepares 32-bit SGL 1734 * megasas_make_sgl_fusion - Prepares 32-bit SGL
1433 * @instance: Adapter soft state 1735 * @instance: Adapter soft state
1434 * @scp: SCSI command from the mid-layer 1736 * @scp: SCSI command from the mid-layer
1435 * @sgl_ptr: SGL to be filled in 1737 * @sgl_ptr: SGL to be filled in
1436 * @cmd: cmd we are working on 1738 * @cmd: cmd we are working on
1739 * @sge_count sge count
1437 * 1740 *
1438 * If successful, this function returns the number of SG elements.
1439 */ 1741 */
1440static int 1742static void
1441megasas_make_sgl_fusion(struct megasas_instance *instance, 1743megasas_make_sgl_fusion(struct megasas_instance *instance,
1442 struct scsi_cmnd *scp, 1744 struct scsi_cmnd *scp,
1443 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 1745 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1444 struct megasas_cmd_fusion *cmd) 1746 struct megasas_cmd_fusion *cmd, int sge_count)
1445{ 1747{
1446 int i, sg_processed, sge_count; 1748 int i, sg_processed;
1447 struct scatterlist *os_sgl; 1749 struct scatterlist *os_sgl;
1448 struct fusion_context *fusion; 1750 struct fusion_context *fusion;
1449 1751
1450 fusion = instance->ctrl_context; 1752 fusion = instance->ctrl_context;
1451 1753
1452 if (fusion->adapter_type == INVADER_SERIES) { 1754 if (fusion->adapter_type >= INVADER_SERIES) {
1453 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 1755 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1454 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 1756 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1455 sgl_ptr_end->Flags = 0; 1757 sgl_ptr_end->Flags = 0;
1456 } 1758 }
1457 1759
1458 sge_count = scsi_dma_map(scp);
1459
1460 BUG_ON(sge_count < 0);
1461
1462 if (sge_count > instance->max_num_sge || !sge_count)
1463 return sge_count;
1464
1465 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1760 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1466 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 1761 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1467 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 1762 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1468 sgl_ptr->Flags = 0; 1763 sgl_ptr->Flags = 0;
1469 if (fusion->adapter_type == INVADER_SERIES) 1764 if (fusion->adapter_type >= INVADER_SERIES)
1470 if (i == sge_count - 1) 1765 if (i == sge_count - 1)
1471 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1766 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1472 sgl_ptr++; 1767 sgl_ptr++;
1473
1474 sg_processed = i + 1; 1768 sg_processed = i + 1;
1475 1769
1476 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && 1770 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
1477 (sge_count > fusion->max_sge_in_main_msg)) { 1771 (sge_count > fusion->max_sge_in_main_msg)) {
1478 1772
1479 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 1773 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1480 if (fusion->adapter_type == INVADER_SERIES) { 1774 if (fusion->adapter_type >= INVADER_SERIES) {
1481 if ((le16_to_cpu(cmd->io_request->IoFlags) & 1775 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1482 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 1776 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1483 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1777 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1493,7 +1787,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1493 sg_chain = sgl_ptr; 1787 sg_chain = sgl_ptr;
1494 /* Prepare chain element */ 1788 /* Prepare chain element */
1495 sg_chain->NextChainOffset = 0; 1789 sg_chain->NextChainOffset = 0;
1496 if (fusion->adapter_type == INVADER_SERIES) 1790 if (fusion->adapter_type >= INVADER_SERIES)
1497 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1791 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1498 else 1792 else
1499 sg_chain->Flags = 1793 sg_chain->Flags =
@@ -1507,6 +1801,45 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1507 memset(sgl_ptr, 0, instance->max_chain_frame_sz); 1801 memset(sgl_ptr, 0, instance->max_chain_frame_sz);
1508 } 1802 }
1509 } 1803 }
1804 atomic_inc(&instance->ieee_sgl);
1805}
1806
1807/**
1808 * megasas_make_sgl - Build Scatter Gather List(SGLs)
1809 * @scp: SCSI command pointer
1810 * @instance: Soft instance of controller
1811 * @cmd: Fusion command pointer
1812 *
1813 * This function will build sgls based on device type.
1814 * For nvme drives, there is different way of building sgls in nvme native
1815 * format- PRPs(Physical Region Page).
1816 *
1817 * Returns the number of sg lists actually used, zero if the sg lists
1818 * is NULL, or -ENOMEM if the mapping failed
1819 */
1820static
1821int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
1822 struct megasas_cmd_fusion *cmd)
1823{
1824 int sge_count;
1825 bool build_prp = false;
1826 struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64;
1827
1828 sge_count = scsi_dma_map(scp);
1829
1830 if ((sge_count > instance->max_num_sge) || (sge_count <= 0))
1831 return sge_count;
1832
1833 sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL;
1834 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1835 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
1836 (cmd->pd_interface == NVME_PD))
1837 build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64,
1838 cmd, sge_count);
1839
1840 if (!build_prp)
1841 megasas_make_sgl_fusion(instance, scp, sgl_chain64,
1842 cmd, sge_count);
1510 1843
1511 return sge_count; 1844 return sge_count;
1512} 1845}
@@ -1525,7 +1858,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1525 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 1858 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1526{ 1859{
1527 struct MR_LD_RAID *raid; 1860 struct MR_LD_RAID *raid;
1528 u32 ld; 1861 u16 ld;
1529 u64 start_blk = io_info->pdBlock; 1862 u64 start_blk = io_info->pdBlock;
1530 u8 *cdb = io_request->CDB.CDB32; 1863 u8 *cdb = io_request->CDB.CDB32;
1531 u32 num_blocks = io_info->numBlocks; 1864 u32 num_blocks = io_info->numBlocks;
@@ -1574,6 +1907,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1574 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1907 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1575 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1908 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1576 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1909 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1910 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE |
1577 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1911 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1578 } else { 1912 } else {
1579 io_request->EEDPFlags = cpu_to_le16( 1913 io_request->EEDPFlags = cpu_to_le16(
@@ -1688,6 +2022,166 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1688} 2022}
1689 2023
1690/** 2024/**
2025 * megasas_stream_detect - stream detection on read and and write IOs
2026 * @instance: Adapter soft state
2027 * @cmd: Command to be prepared
2028 * @io_info: IO Request info
2029 *
2030 */
2031
2032/** stream detection on read and and write IOs */
2033static void megasas_stream_detect(struct megasas_instance *instance,
2034 struct megasas_cmd_fusion *cmd,
2035 struct IO_REQUEST_INFO *io_info)
2036{
2037 struct fusion_context *fusion = instance->ctrl_context;
2038 u32 device_id = io_info->ldTgtId;
2039 struct LD_STREAM_DETECT *current_ld_sd
2040 = fusion->stream_detect_by_ld[device_id];
2041 u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num;
2042 u32 shifted_values, unshifted_values;
2043 u32 index_value_mask, shifted_values_mask;
2044 int i;
2045 bool is_read_ahead = false;
2046 struct STREAM_DETECT *current_sd;
2047 /* find possible stream */
2048 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
2049 stream_num = (*track_stream >>
2050 (i * BITS_PER_INDEX_STREAM)) &
2051 STREAM_MASK;
2052 current_sd = &current_ld_sd->stream_track[stream_num];
2053 /* if we found a stream, update the raid
2054 * context and also update the mruBitMap
2055 */
2056 /* boundary condition */
2057 if ((current_sd->next_seq_lba) &&
2058 (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
2059 (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
2060 (current_sd->is_read == io_info->isRead)) {
2061
2062 if ((io_info->ldStartBlock != current_sd->next_seq_lba) &&
2063 ((!io_info->isRead) || (!is_read_ahead)))
2064 /*
2065 * Once the API availible we need to change this.
2066 * At this point we are not allowing any gap
2067 */
2068 continue;
2069
2070 SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
2071 current_sd->next_seq_lba =
2072 io_info->ldStartBlock + io_info->numBlocks;
2073 /*
2074 * update the mruBitMap LRU
2075 */
2076 shifted_values_mask =
2077 (1 << i * BITS_PER_INDEX_STREAM) - 1;
2078 shifted_values = ((*track_stream & shifted_values_mask)
2079 << BITS_PER_INDEX_STREAM);
2080 index_value_mask =
2081 STREAM_MASK << i * BITS_PER_INDEX_STREAM;
2082 unshifted_values =
2083 *track_stream & ~(shifted_values_mask |
2084 index_value_mask);
2085 *track_stream =
2086 unshifted_values | shifted_values | stream_num;
2087 return;
2088 }
2089 }
2090 /*
2091 * if we did not find any stream, create a new one
2092 * from the least recently used
2093 */
2094 stream_num = (*track_stream >>
2095 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
2096 STREAM_MASK;
2097 current_sd = &current_ld_sd->stream_track[stream_num];
2098 current_sd->is_read = io_info->isRead;
2099 current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
2100 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
2101 return;
2102}
2103
2104/**
2105 * megasas_set_raidflag_cpu_affinity - This function sets the cpu
2106 * affinity (cpu of the controller) and raid_flags in the raid context
2107 * based on IO type.
2108 *
2109 * @praid_context: IO RAID context
2110 * @raid: LD raid map
2111 * @fp_possible: Is fast path possible?
2112 * @is_read: Is read IO?
2113 *
2114 */
2115static void
2116megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
2117 struct MR_LD_RAID *raid, bool fp_possible,
2118 u8 is_read, u32 scsi_buff_len)
2119{
2120 u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
2121 struct RAID_CONTEXT_G35 *rctx_g35;
2122
2123 rctx_g35 = &praid_context->raid_context_g35;
2124 if (fp_possible) {
2125 if (is_read) {
2126 if ((raid->cpuAffinity.pdRead.cpu0) &&
2127 (raid->cpuAffinity.pdRead.cpu1))
2128 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2129 else if (raid->cpuAffinity.pdRead.cpu1)
2130 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2131 } else {
2132 if ((raid->cpuAffinity.pdWrite.cpu0) &&
2133 (raid->cpuAffinity.pdWrite.cpu1))
2134 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2135 else if (raid->cpuAffinity.pdWrite.cpu1)
2136 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2137 /* Fast path cache by pass capable R0/R1 VD */
2138 if ((raid->level <= 1) &&
2139 (raid->capability.fp_cache_bypass_capable)) {
2140 rctx_g35->routing_flags |=
2141 (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT);
2142 rctx_g35->raid_flags =
2143 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
2144 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2145 }
2146 }
2147 } else {
2148 if (is_read) {
2149 if ((raid->cpuAffinity.ldRead.cpu0) &&
2150 (raid->cpuAffinity.ldRead.cpu1))
2151 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2152 else if (raid->cpuAffinity.ldRead.cpu1)
2153 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2154 } else {
2155 if ((raid->cpuAffinity.ldWrite.cpu0) &&
2156 (raid->cpuAffinity.ldWrite.cpu1))
2157 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2158 else if (raid->cpuAffinity.ldWrite.cpu1)
2159 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2160
2161 if (is_stream_detected(rctx_g35) &&
2162 (raid->level == 5) &&
2163 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
2164 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
2165 cpu_sel = MR_RAID_CTX_CPUSEL_0;
2166 }
2167 }
2168
2169 rctx_g35->routing_flags |=
2170 (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2171
2172 /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2173 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
2174 * IO Subtype is not bitmap.
2175 */
2176 if ((raid->level == 1) && (!is_read)) {
2177 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
2178 praid_context->raid_context_g35.raid_flags =
2179 (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2180 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2181 }
2182}
2183
2184/**
1691 * megasas_build_ldio_fusion - Prepares IOs to devices 2185 * megasas_build_ldio_fusion - Prepares IOs to devices
1692 * @instance: Adapter soft state 2186 * @instance: Adapter soft state
1693 * @scp: SCSI command 2187 * @scp: SCSI command
@@ -1701,29 +2195,36 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1701 struct scsi_cmnd *scp, 2195 struct scsi_cmnd *scp,
1702 struct megasas_cmd_fusion *cmd) 2196 struct megasas_cmd_fusion *cmd)
1703{ 2197{
1704 u8 fp_possible; 2198 bool fp_possible;
2199 u16 ld;
1705 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; 2200 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
2201 u32 scsi_buff_len;
1706 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2202 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1707 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2203 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1708 struct IO_REQUEST_INFO io_info; 2204 struct IO_REQUEST_INFO io_info;
1709 struct fusion_context *fusion; 2205 struct fusion_context *fusion;
1710 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2206 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1711 u8 *raidLUN; 2207 u8 *raidLUN;
2208 unsigned long spinlock_flags;
2209 union RAID_CONTEXT_UNION *praid_context;
2210 struct MR_LD_RAID *raid = NULL;
2211 struct MR_PRIV_DEVICE *mrdev_priv;
1712 2212
1713 device_id = MEGASAS_DEV_INDEX(scp); 2213 device_id = MEGASAS_DEV_INDEX(scp);
1714 2214
1715 fusion = instance->ctrl_context; 2215 fusion = instance->ctrl_context;
1716 2216
1717 io_request = cmd->io_request; 2217 io_request = cmd->io_request;
1718 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); 2218 io_request->RaidContext.raid_context.virtual_disk_tgt_id =
1719 io_request->RaidContext.status = 0; 2219 cpu_to_le16(device_id);
1720 io_request->RaidContext.exStatus = 0; 2220 io_request->RaidContext.raid_context.status = 0;
2221 io_request->RaidContext.raid_context.ex_status = 0;
1721 2222
1722 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 2223 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
1723 2224
1724 start_lba_lo = 0; 2225 start_lba_lo = 0;
1725 start_lba_hi = 0; 2226 start_lba_hi = 0;
1726 fp_possible = 0; 2227 fp_possible = false;
1727 2228
1728 /* 2229 /*
1729 * 6-byte READ(0x08) or WRITE(0x0A) cdb 2230 * 6-byte READ(0x08) or WRITE(0x0A) cdb
@@ -1779,22 +2280,27 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1779 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 2280 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1780 io_info.numBlocks = datalength; 2281 io_info.numBlocks = datalength;
1781 io_info.ldTgtId = device_id; 2282 io_info.ldTgtId = device_id;
1782 io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); 2283 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2284 scsi_buff_len = scsi_bufflen(scp);
2285 io_request->DataLength = cpu_to_le32(scsi_buff_len);
1783 2286
1784 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 2287 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1785 io_info.isRead = 1; 2288 io_info.isRead = 1;
1786 2289
1787 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2290 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2291 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1788 2292
1789 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= 2293 if (ld < instance->fw_supported_vd_count)
1790 instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { 2294 raid = MR_LdRaidGet(ld, local_map_ptr);
1791 io_request->RaidContext.regLockFlags = 0; 2295
1792 fp_possible = 0; 2296 if (!raid || (!fusion->fast_path_io)) {
2297 io_request->RaidContext.raid_context.reg_lock_flags = 0;
2298 fp_possible = false;
1793 } else { 2299 } else {
1794 if (MR_BuildRaidContext(instance, &io_info, 2300 if (MR_BuildRaidContext(instance, &io_info,
1795 &io_request->RaidContext, 2301 &io_request->RaidContext.raid_context,
1796 local_map_ptr, &raidLUN)) 2302 local_map_ptr, &raidLUN))
1797 fp_possible = io_info.fpOkForIo; 2303 fp_possible = (io_info.fpOkForIo > 0) ? true : false;
1798 } 2304 }
1799 2305
1800 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU 2306 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
@@ -1803,6 +2309,54 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1803 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? 2309 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
1804 raw_smp_processor_id() % instance->msix_vectors : 0; 2310 raw_smp_processor_id() % instance->msix_vectors : 0;
1805 2311
2312 praid_context = &io_request->RaidContext;
2313
2314 if (instance->is_ventura) {
2315 spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
2316 megasas_stream_detect(instance, cmd, &io_info);
2317 spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
2318 /* In ventura if stream detected for a read and it is read ahead
2319 * capable make this IO as LDIO
2320 */
2321 if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
2322 io_info.isRead && io_info.ra_capable)
2323 fp_possible = false;
2324
2325 /* FP for Optimal raid level 1.
2326 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2327 * are built by the driver as LD I/Os.
2328 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
2329 * (there is never a reason to process these as buffered writes)
2330 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
2331 * with the SLD bit asserted.
2332 */
2333 if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
2334 mrdev_priv = scp->device->hostdata;
2335
2336 if (atomic_inc_return(&instance->fw_outstanding) >
2337 (instance->host->can_queue)) {
2338 fp_possible = false;
2339 atomic_dec(&instance->fw_outstanding);
2340 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
2341 atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
2342 fp_possible = false;
2343 atomic_dec(&instance->fw_outstanding);
2344 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
2345 atomic_set(&mrdev_priv->r1_ldio_hint,
2346 instance->r1_ldio_hint_default);
2347 }
2348 }
2349
2350 /* If raid is NULL, set CPU affinity to default CPU0 */
2351 if (raid)
2352 megasas_set_raidflag_cpu_affinity(praid_context,
2353 raid, fp_possible, io_info.isRead,
2354 scsi_buff_len);
2355 else
2356 praid_context->raid_context_g35.routing_flags |=
2357 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2358 }
2359
1806 if (fp_possible) { 2360 if (fp_possible) {
1807 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 2361 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1808 local_map_ptr, start_lba_lo); 2362 local_map_ptr, start_lba_lo);
@@ -1811,29 +2365,52 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1811 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO 2365 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
1812 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2366 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1813 if (fusion->adapter_type == INVADER_SERIES) { 2367 if (fusion->adapter_type == INVADER_SERIES) {
1814 if (io_request->RaidContext.regLockFlags == 2368 if (io_request->RaidContext.raid_context.reg_lock_flags ==
1815 REGION_TYPE_UNUSED) 2369 REGION_TYPE_UNUSED)
1816 cmd->request_desc->SCSIIO.RequestFlags = 2370 cmd->request_desc->SCSIIO.RequestFlags =
1817 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2371 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1818 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2372 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1819 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 2373 io_request->RaidContext.raid_context.type
1820 io_request->RaidContext.nseg = 0x1; 2374 = MPI2_TYPE_CUDA;
2375 io_request->RaidContext.raid_context.nseg = 0x1;
1821 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2376 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1822 io_request->RaidContext.regLockFlags |= 2377 io_request->RaidContext.raid_context.reg_lock_flags |=
1823 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 2378 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1824 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2379 MR_RL_FLAGS_SEQ_NUM_ENABLE);
2380 } else if (instance->is_ventura) {
2381 io_request->RaidContext.raid_context_g35.nseg_type |=
2382 (1 << RAID_CONTEXT_NSEG_SHIFT);
2383 io_request->RaidContext.raid_context_g35.nseg_type |=
2384 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2385 io_request->RaidContext.raid_context_g35.routing_flags |=
2386 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2387 io_request->IoFlags |=
2388 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1825 } 2389 }
1826 if ((fusion->load_balance_info[device_id].loadBalanceFlag) && 2390 if (fusion->load_balance_info &&
1827 (io_info.isRead)) { 2391 (fusion->load_balance_info[device_id].loadBalanceFlag) &&
2392 (io_info.isRead)) {
1828 io_info.devHandle = 2393 io_info.devHandle =
1829 get_updated_dev_handle(instance, 2394 get_updated_dev_handle(instance,
1830 &fusion->load_balance_info[device_id], 2395 &fusion->load_balance_info[device_id],
1831 &io_info); 2396 &io_info, local_map_ptr);
1832 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 2397 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1833 cmd->pd_r1_lb = io_info.pd_after_lb; 2398 cmd->pd_r1_lb = io_info.pd_after_lb;
2399 if (instance->is_ventura)
2400 io_request->RaidContext.raid_context_g35.span_arm
2401 = io_info.span_arm;
2402 else
2403 io_request->RaidContext.raid_context.span_arm
2404 = io_info.span_arm;
2405
1834 } else 2406 } else
1835 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 2407 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1836 2408
2409 if (instance->is_ventura)
2410 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
2411 else
2412 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2413
1837 if ((raidLUN[0] == 1) && 2414 if ((raidLUN[0] == 1) &&
1838 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { 2415 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
1839 instance->dev_handle = !(instance->dev_handle); 2416 instance->dev_handle = !(instance->dev_handle);
@@ -1843,28 +2420,39 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1843 2420
1844 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 2421 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1845 io_request->DevHandle = io_info.devHandle; 2422 io_request->DevHandle = io_info.devHandle;
2423 cmd->pd_interface = io_info.pd_interface;
1846 /* populate the LUN field */ 2424 /* populate the LUN field */
1847 memcpy(io_request->LUN, raidLUN, 8); 2425 memcpy(io_request->LUN, raidLUN, 8);
1848 } else { 2426 } else {
1849 io_request->RaidContext.timeoutValue = 2427 io_request->RaidContext.raid_context.timeout_value =
1850 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 2428 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1851 cmd->request_desc->SCSIIO.RequestFlags = 2429 cmd->request_desc->SCSIIO.RequestFlags =
1852 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 2430 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1853 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2431 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1854 if (fusion->adapter_type == INVADER_SERIES) { 2432 if (fusion->adapter_type == INVADER_SERIES) {
1855 if (io_info.do_fp_rlbypass || 2433 if (io_info.do_fp_rlbypass ||
1856 (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)) 2434 (io_request->RaidContext.raid_context.reg_lock_flags
2435 == REGION_TYPE_UNUSED))
1857 cmd->request_desc->SCSIIO.RequestFlags = 2436 cmd->request_desc->SCSIIO.RequestFlags =
1858 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2437 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1859 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2438 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1860 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 2439 io_request->RaidContext.raid_context.type
1861 io_request->RaidContext.regLockFlags |= 2440 = MPI2_TYPE_CUDA;
2441 io_request->RaidContext.raid_context.reg_lock_flags |=
1862 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 2442 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1863 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2443 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1864 io_request->RaidContext.nseg = 0x1; 2444 io_request->RaidContext.raid_context.nseg = 0x1;
2445 } else if (instance->is_ventura) {
2446 io_request->RaidContext.raid_context_g35.routing_flags |=
2447 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2448 io_request->RaidContext.raid_context_g35.nseg_type |=
2449 (1 << RAID_CONTEXT_NSEG_SHIFT);
2450 io_request->RaidContext.raid_context_g35.nseg_type |=
2451 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
1865 } 2452 }
1866 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2453 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1867 io_request->DevHandle = cpu_to_le16(device_id); 2454 io_request->DevHandle = cpu_to_le16(device_id);
2455
1868 } /* Not FP */ 2456 } /* Not FP */
1869} 2457}
1870 2458
@@ -1881,27 +2469,26 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1881{ 2469{
1882 u32 device_id; 2470 u32 device_id;
1883 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2471 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1884 u16 pd_index = 0; 2472 u16 ld;
1885 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2473 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1886 struct fusion_context *fusion = instance->ctrl_context; 2474 struct fusion_context *fusion = instance->ctrl_context;
1887 u8 span, physArm; 2475 u8 span, physArm;
1888 __le16 devHandle; 2476 __le16 devHandle;
1889 u32 ld, arRef, pd; 2477 u32 arRef, pd;
1890 struct MR_LD_RAID *raid; 2478 struct MR_LD_RAID *raid;
1891 struct RAID_CONTEXT *pRAID_Context; 2479 struct RAID_CONTEXT *pRAID_Context;
1892 u8 fp_possible = 1; 2480 u8 fp_possible = 1;
1893 2481
1894 io_request = cmd->io_request; 2482 io_request = cmd->io_request;
1895 device_id = MEGASAS_DEV_INDEX(scmd); 2483 device_id = MEGASAS_DEV_INDEX(scmd);
1896 pd_index = MEGASAS_PD_INDEX(scmd);
1897 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2484 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1898 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 2485 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1899 /* get RAID_Context pointer */ 2486 /* get RAID_Context pointer */
1900 pRAID_Context = &io_request->RaidContext; 2487 pRAID_Context = &io_request->RaidContext.raid_context;
1901 /* Check with FW team */ 2488 /* Check with FW team */
1902 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2489 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
1903 pRAID_Context->regLockRowLBA = 0; 2490 pRAID_Context->reg_lock_row_lba = 0;
1904 pRAID_Context->regLockLength = 0; 2491 pRAID_Context->reg_lock_length = 0;
1905 2492
1906 if (fusion->fast_path_io && ( 2493 if (fusion->fast_path_io && (
1907 device_id < instance->fw_supported_vd_count)) { 2494 device_id < instance->fw_supported_vd_count)) {
@@ -1909,10 +2496,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1909 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 2496 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1910 if (ld >= instance->fw_supported_vd_count) 2497 if (ld >= instance->fw_supported_vd_count)
1911 fp_possible = 0; 2498 fp_possible = 0;
1912 2499 else {
1913 raid = MR_LdRaidGet(ld, local_map_ptr); 2500 raid = MR_LdRaidGet(ld, local_map_ptr);
1914 if (!(raid->capability.fpNonRWCapable)) 2501 if (!(raid->capability.fpNonRWCapable))
1915 fp_possible = 0; 2502 fp_possible = 0;
2503 }
1916 } else 2504 } else
1917 fp_possible = 0; 2505 fp_possible = 0;
1918 2506
@@ -1920,7 +2508,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1920 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2508 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1921 io_request->DevHandle = cpu_to_le16(device_id); 2509 io_request->DevHandle = cpu_to_le16(device_id);
1922 io_request->LUN[1] = scmd->device->lun; 2510 io_request->LUN[1] = scmd->device->lun;
1923 pRAID_Context->timeoutValue = 2511 pRAID_Context->timeout_value =
1924 cpu_to_le16 (scmd->request->timeout / HZ); 2512 cpu_to_le16 (scmd->request->timeout / HZ);
1925 cmd->request_desc->SCSIIO.RequestFlags = 2513 cmd->request_desc->SCSIIO.RequestFlags =
1926 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2514 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
@@ -1928,9 +2516,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1928 } else { 2516 } else {
1929 2517
1930 /* set RAID context values */ 2518 /* set RAID context values */
1931 pRAID_Context->configSeqNum = raid->seqNum; 2519 pRAID_Context->config_seq_num = raid->seqNum;
1932 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 2520 if (!instance->is_ventura)
1933 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); 2521 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
2522 pRAID_Context->timeout_value =
2523 cpu_to_le16(raid->fpIoTimeoutForLd);
1934 2524
1935 /* get the DevHandle for the PD (since this is 2525 /* get the DevHandle for the PD (since this is
1936 fpNonRWCapable, this is a single disk RAID0) */ 2526 fpNonRWCapable, this is a single disk RAID0) */
@@ -1965,7 +2555,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1965 */ 2555 */
1966static void 2556static void
1967megasas_build_syspd_fusion(struct megasas_instance *instance, 2557megasas_build_syspd_fusion(struct megasas_instance *instance,
1968 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible) 2558 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
2559 bool fp_possible)
1969{ 2560{
1970 u32 device_id; 2561 u32 device_id;
1971 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 2562 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
@@ -1975,22 +2566,25 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
1975 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2566 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1976 struct RAID_CONTEXT *pRAID_Context; 2567 struct RAID_CONTEXT *pRAID_Context;
1977 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 2568 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
2569 struct MR_PRIV_DEVICE *mr_device_priv_data;
1978 struct fusion_context *fusion = instance->ctrl_context; 2570 struct fusion_context *fusion = instance->ctrl_context;
1979 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; 2571 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
1980 2572
1981 device_id = MEGASAS_DEV_INDEX(scmd); 2573 device_id = MEGASAS_DEV_INDEX(scmd);
1982 pd_index = MEGASAS_PD_INDEX(scmd); 2574 pd_index = MEGASAS_PD_INDEX(scmd);
1983 os_timeout_value = scmd->request->timeout / HZ; 2575 os_timeout_value = scmd->request->timeout / HZ;
2576 mr_device_priv_data = scmd->device->hostdata;
2577 cmd->pd_interface = mr_device_priv_data->interface_type;
1984 2578
1985 io_request = cmd->io_request; 2579 io_request = cmd->io_request;
1986 /* get RAID_Context pointer */ 2580 /* get RAID_Context pointer */
1987 pRAID_Context = &io_request->RaidContext; 2581 pRAID_Context = &io_request->RaidContext.raid_context;
1988 pRAID_Context->regLockFlags = 0; 2582 pRAID_Context->reg_lock_flags = 0;
1989 pRAID_Context->regLockRowLBA = 0; 2583 pRAID_Context->reg_lock_row_lba = 0;
1990 pRAID_Context->regLockLength = 0; 2584 pRAID_Context->reg_lock_length = 0;
1991 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 2585 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1992 io_request->LUN[1] = scmd->device->lun; 2586 io_request->LUN[1] = scmd->device->lun;
1993 pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 2587 pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1994 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 2588 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1995 2589
1996 /* If FW supports PD sequence number */ 2590 /* If FW supports PD sequence number */
@@ -1999,24 +2593,38 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
1999 /* TgtId must be incremented by 255 as jbod seq number is index 2593 /* TgtId must be incremented by 255 as jbod seq number is index
2000 * below raid map 2594 * below raid map
2001 */ 2595 */
2002 pRAID_Context->VirtualDiskTgtId = 2596 /* More than 256 PD/JBOD support for Ventura */
2003 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); 2597 if (instance->support_morethan256jbod)
2004 pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum; 2598 pRAID_Context->virtual_disk_tgt_id =
2599 pd_sync->seq[pd_index].pd_target_id;
2600 else
2601 pRAID_Context->virtual_disk_tgt_id =
2602 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
2603 pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
2005 io_request->DevHandle = pd_sync->seq[pd_index].devHandle; 2604 io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2006 pRAID_Context->regLockFlags |= 2605 if (instance->is_ventura) {
2007 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 2606 io_request->RaidContext.raid_context_g35.routing_flags |=
2008 pRAID_Context->Type = MPI2_TYPE_CUDA; 2607 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2009 pRAID_Context->nseg = 0x1; 2608 io_request->RaidContext.raid_context_g35.nseg_type |=
2609 (1 << RAID_CONTEXT_NSEG_SHIFT);
2610 io_request->RaidContext.raid_context_g35.nseg_type |=
2611 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2612 } else {
2613 pRAID_Context->type = MPI2_TYPE_CUDA;
2614 pRAID_Context->nseg = 0x1;
2615 pRAID_Context->reg_lock_flags |=
2616 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
2617 }
2010 } else if (fusion->fast_path_io) { 2618 } else if (fusion->fast_path_io) {
2011 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2619 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2012 pRAID_Context->configSeqNum = 0; 2620 pRAID_Context->config_seq_num = 0;
2013 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2621 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2014 io_request->DevHandle = 2622 io_request->DevHandle =
2015 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 2623 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
2016 } else { 2624 } else {
2017 /* Want to send all IO via FW path */ 2625 /* Want to send all IO via FW path */
2018 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2626 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2019 pRAID_Context->configSeqNum = 0; 2627 pRAID_Context->config_seq_num = 0;
2020 io_request->DevHandle = cpu_to_le16(0xFFFF); 2628 io_request->DevHandle = cpu_to_le16(0xFFFF);
2021 } 2629 }
2022 2630
@@ -2032,17 +2640,17 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2032 cmd->request_desc->SCSIIO.RequestFlags = 2640 cmd->request_desc->SCSIIO.RequestFlags =
2033 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2641 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2034 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2642 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2035 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); 2643 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
2036 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 2644 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2037 } else { 2645 } else {
2038 /* system pd Fast Path */ 2646 /* system pd Fast Path */
2039 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2647 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2040 timeout_limit = (scmd->device->type == TYPE_DISK) ? 2648 timeout_limit = (scmd->device->type == TYPE_DISK) ?
2041 255 : 0xFFFF; 2649 255 : 0xFFFF;
2042 pRAID_Context->timeoutValue = 2650 pRAID_Context->timeout_value =
2043 cpu_to_le16((os_timeout_value > timeout_limit) ? 2651 cpu_to_le16((os_timeout_value > timeout_limit) ?
2044 timeout_limit : os_timeout_value); 2652 timeout_limit : os_timeout_value);
2045 if (fusion->adapter_type == INVADER_SERIES) 2653 if (fusion->adapter_type >= INVADER_SERIES)
2046 io_request->IoFlags |= 2654 io_request->IoFlags |=
2047 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2655 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2048 2656
@@ -2066,9 +2674,11 @@ megasas_build_io_fusion(struct megasas_instance *instance,
2066 struct scsi_cmnd *scp, 2674 struct scsi_cmnd *scp,
2067 struct megasas_cmd_fusion *cmd) 2675 struct megasas_cmd_fusion *cmd)
2068{ 2676{
2069 u16 sge_count; 2677 int sge_count;
2070 u8 cmd_type; 2678 u8 cmd_type;
2071 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 2679 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
2680 struct MR_PRIV_DEVICE *mr_device_priv_data;
2681 mr_device_priv_data = scp->device->hostdata;
2072 2682
2073 /* Zero out some fields so they don't get reused */ 2683 /* Zero out some fields so they don't get reused */
2074 memset(io_request->LUN, 0x0, 8); 2684 memset(io_request->LUN, 0x0, 8);
@@ -2078,9 +2688,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
2078 io_request->Control = 0; 2688 io_request->Control = 0;
2079 io_request->EEDPBlockSize = 0; 2689 io_request->EEDPBlockSize = 0;
2080 io_request->ChainOffset = 0; 2690 io_request->ChainOffset = 0;
2081 io_request->RaidContext.RAIDFlags = 0; 2691 io_request->RaidContext.raid_context.raid_flags = 0;
2082 io_request->RaidContext.Type = 0; 2692 io_request->RaidContext.raid_context.type = 0;
2083 io_request->RaidContext.nseg = 0; 2693 io_request->RaidContext.raid_context.nseg = 0;
2084 2694
2085 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 2695 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
2086 /* 2696 /*
@@ -2097,12 +2707,14 @@ megasas_build_io_fusion(struct megasas_instance *instance,
2097 megasas_build_ld_nonrw_fusion(instance, scp, cmd); 2707 megasas_build_ld_nonrw_fusion(instance, scp, cmd);
2098 break; 2708 break;
2099 case READ_WRITE_SYSPDIO: 2709 case READ_WRITE_SYSPDIO:
2710 megasas_build_syspd_fusion(instance, scp, cmd, true);
2711 break;
2100 case NON_READ_WRITE_SYSPDIO: 2712 case NON_READ_WRITE_SYSPDIO:
2101 if (instance->secure_jbod_support && 2713 if (instance->secure_jbod_support ||
2102 (cmd_type == NON_READ_WRITE_SYSPDIO)) 2714 mr_device_priv_data->is_tm_capable)
2103 megasas_build_syspd_fusion(instance, scp, cmd, 0); 2715 megasas_build_syspd_fusion(instance, scp, cmd, false);
2104 else 2716 else
2105 megasas_build_syspd_fusion(instance, scp, cmd, 1); 2717 megasas_build_syspd_fusion(instance, scp, cmd, true);
2106 break; 2718 break;
2107 default: 2719 default:
2108 break; 2720 break;
@@ -2112,23 +2724,27 @@ megasas_build_io_fusion(struct megasas_instance *instance,
2112 * Construct SGL 2724 * Construct SGL
2113 */ 2725 */
2114 2726
2115 sge_count = 2727 sge_count = megasas_make_sgl(instance, scp, cmd);
2116 megasas_make_sgl_fusion(instance, scp,
2117 (struct MPI25_IEEE_SGE_CHAIN64 *)
2118 &io_request->SGL, cmd);
2119 2728
2120 if (sge_count > instance->max_num_sge) { 2729 if (sge_count > instance->max_num_sge || (sge_count < 0)) {
2121 dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds " 2730 dev_err(&instance->pdev->dev,
2122 "max (0x%x) allowed\n", sge_count, 2731 "%s %d sge_count (%d) is out of range. Range is: 0-%d\n",
2123 instance->max_num_sge); 2732 __func__, __LINE__, sge_count, instance->max_num_sge);
2124 return 1; 2733 return 1;
2125 } 2734 }
2126 2735
2127 /* numSGE store lower 8 bit of sge_count. 2736 if (instance->is_ventura) {
2128 * numSGEExt store higher 8 bit of sge_count 2737 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
2129 */ 2738 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
2130 io_request->RaidContext.numSGE = sge_count; 2739 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
2131 io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8); 2740 } else {
2741 /* numSGE store lower 8 bit of sge_count.
2742 * numSGEExt store higher 8 bit of sge_count
2743 */
2744 io_request->RaidContext.raid_context.num_sge = sge_count;
2745 io_request->RaidContext.raid_context.num_sge_ext =
2746 (u8)(sge_count >> 8);
2747 }
2132 2748
2133 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 2749 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
2134 2750
@@ -2149,25 +2765,61 @@ megasas_build_io_fusion(struct megasas_instance *instance,
2149 return 0; 2765 return 0;
2150} 2766}
2151 2767
2152union MEGASAS_REQUEST_DESCRIPTOR_UNION * 2768static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2153megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) 2769megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
2154{ 2770{
2155 u8 *p; 2771 u8 *p;
2156 struct fusion_context *fusion; 2772 struct fusion_context *fusion;
2157 2773
2158 if (index >= instance->max_fw_cmds) {
2159 dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
2160 "descriptor for scsi%d\n", index,
2161 instance->host->host_no);
2162 return NULL;
2163 }
2164 fusion = instance->ctrl_context; 2774 fusion = instance->ctrl_context;
2165 p = fusion->req_frames_desc 2775 p = fusion->req_frames_desc +
2166 +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; 2776 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index;
2167 2777
2168 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; 2778 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
2169} 2779}
2170 2780
2781
2782/* megasas_prepate_secondRaid1_IO
2783 * It prepares the raid 1 second IO
2784 */
2785void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
2786 struct megasas_cmd_fusion *cmd,
2787 struct megasas_cmd_fusion *r1_cmd)
2788{
2789 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
2790 struct fusion_context *fusion;
2791 fusion = instance->ctrl_context;
2792 req_desc = cmd->request_desc;
2793 /* copy the io request frame as well as 8 SGEs data for r1 command*/
2794 memcpy(r1_cmd->io_request, cmd->io_request,
2795 (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
2796 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
2797 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
2798 /*sense buffer is different for r1 command*/
2799 r1_cmd->io_request->SenseBufferLowAddress =
2800 cpu_to_le32(r1_cmd->sense_phys_addr);
2801 r1_cmd->scmd = cmd->scmd;
2802 req_desc2 = megasas_get_request_descriptor(instance,
2803 (r1_cmd->index - 1));
2804 req_desc2->Words = 0;
2805 r1_cmd->request_desc = req_desc2;
2806 req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index);
2807 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
2808 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
2809 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
2810 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
2811 cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
2812 cpu_to_le16(r1_cmd->index);
2813 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
2814 cpu_to_le16(cmd->index);
2815 /*MSIxIndex of both commands request descriptors should be same*/
2816 r1_cmd->request_desc->SCSIIO.MSIxIndex =
2817 cmd->request_desc->SCSIIO.MSIxIndex;
2818 /*span arm is different for r1 cmd*/
2819 r1_cmd->io_request->RaidContext.raid_context_g35.span_arm =
2820 cmd->io_request->RaidContext.raid_context_g35.span_arm + 1;
2821}
2822
2171/** 2823/**
2172 * megasas_build_and_issue_cmd_fusion -Main routine for building and 2824 * megasas_build_and_issue_cmd_fusion -Main routine for building and
2173 * issuing non IOCTL cmd 2825 * issuing non IOCTL cmd
@@ -2178,7 +2830,7 @@ static u32
2178megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, 2830megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2179 struct scsi_cmnd *scmd) 2831 struct scsi_cmnd *scmd)
2180{ 2832{
2181 struct megasas_cmd_fusion *cmd; 2833 struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
2182 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2834 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2183 u32 index; 2835 u32 index;
2184 struct fusion_context *fusion; 2836 struct fusion_context *fusion;
@@ -2193,13 +2845,22 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2193 return SCSI_MLQUEUE_DEVICE_BUSY; 2845 return SCSI_MLQUEUE_DEVICE_BUSY;
2194 } 2846 }
2195 2847
2848 if (atomic_inc_return(&instance->fw_outstanding) >
2849 instance->host->can_queue) {
2850 atomic_dec(&instance->fw_outstanding);
2851 return SCSI_MLQUEUE_HOST_BUSY;
2852 }
2853
2196 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); 2854 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
2197 2855
2856 if (!cmd) {
2857 atomic_dec(&instance->fw_outstanding);
2858 return SCSI_MLQUEUE_HOST_BUSY;
2859 }
2860
2198 index = cmd->index; 2861 index = cmd->index;
2199 2862
2200 req_desc = megasas_get_request_descriptor(instance, index-1); 2863 req_desc = megasas_get_request_descriptor(instance, index-1);
2201 if (!req_desc)
2202 return SCSI_MLQUEUE_HOST_BUSY;
2203 2864
2204 req_desc->Words = 0; 2865 req_desc->Words = 0;
2205 cmd->request_desc = req_desc; 2866 cmd->request_desc = req_desc;
@@ -2208,6 +2869,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2208 megasas_return_cmd_fusion(instance, cmd); 2869 megasas_return_cmd_fusion(instance, cmd);
2209 dev_err(&instance->pdev->dev, "Error building command\n"); 2870 dev_err(&instance->pdev->dev, "Error building command\n");
2210 cmd->request_desc = NULL; 2871 cmd->request_desc = NULL;
2872 atomic_dec(&instance->fw_outstanding);
2211 return SCSI_MLQUEUE_HOST_BUSY; 2873 return SCSI_MLQUEUE_HOST_BUSY;
2212 } 2874 }
2213 2875
@@ -2218,18 +2880,92 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2218 cmd->io_request->ChainOffset != 0xF) 2880 cmd->io_request->ChainOffset != 0xF)
2219 dev_err(&instance->pdev->dev, "The chain offset value is not " 2881 dev_err(&instance->pdev->dev, "The chain offset value is not "
2220 "correct : %x\n", cmd->io_request->ChainOffset); 2882 "correct : %x\n", cmd->io_request->ChainOffset);
2883 /*
2884 * if it is raid 1/10 fp write capable.
2885 * try to get second command from pool and construct it.
2886 * From FW, it has confirmed that lba values of two PDs
2887 * corresponds to single R1/10 LD are always same
2888 *
2889 */
2890 /* driver side count always should be less than max_fw_cmds
2891 * to get new command
2892 */
2893 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
2894 r1_cmd = megasas_get_cmd_fusion(instance,
2895 (scmd->request->tag + instance->max_fw_cmds));
2896 megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
2897 }
2898
2221 2899
2222 /* 2900 /*
2223 * Issue the command to the FW 2901 * Issue the command to the FW
2224 */ 2902 */
2225 atomic_inc(&instance->fw_outstanding);
2226 2903
2227 megasas_fire_cmd_fusion(instance, req_desc); 2904 megasas_fire_cmd_fusion(instance, req_desc);
2228 2905
2906 if (r1_cmd)
2907 megasas_fire_cmd_fusion(instance, r1_cmd->request_desc);
2908
2909
2229 return 0; 2910 return 0;
2230} 2911}
2231 2912
2232/** 2913/**
2914 * megasas_complete_r1_command -
2915 * completes R1 FP write commands which has valid peer smid
2916 * @instance: Adapter soft state
2917 * @cmd_fusion: MPT command frame
2918 *
2919 */
2920static inline void
2921megasas_complete_r1_command(struct megasas_instance *instance,
2922 struct megasas_cmd_fusion *cmd)
2923{
2924 u8 *sense, status, ex_status;
2925 u32 data_length;
2926 u16 peer_smid;
2927 struct fusion_context *fusion;
2928 struct megasas_cmd_fusion *r1_cmd = NULL;
2929 struct scsi_cmnd *scmd_local = NULL;
2930 struct RAID_CONTEXT_G35 *rctx_g35;
2931
2932 rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
2933 fusion = instance->ctrl_context;
2934 peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
2935
2936 r1_cmd = fusion->cmd_list[peer_smid - 1];
2937 scmd_local = cmd->scmd;
2938 status = rctx_g35->status;
2939 ex_status = rctx_g35->ex_status;
2940 data_length = cmd->io_request->DataLength;
2941 sense = cmd->sense;
2942
2943 cmd->cmd_completed = true;
2944
2945 /* Check if peer command is completed or not*/
2946 if (r1_cmd->cmd_completed) {
2947 rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35;
2948 if (rctx_g35->status != MFI_STAT_OK) {
2949 status = rctx_g35->status;
2950 ex_status = rctx_g35->ex_status;
2951 data_length = r1_cmd->io_request->DataLength;
2952 sense = r1_cmd->sense;
2953 }
2954
2955 megasas_return_cmd_fusion(instance, r1_cmd);
2956 map_cmd_status(fusion, scmd_local, status, ex_status,
2957 le32_to_cpu(data_length), sense);
2958 if (instance->ldio_threshold &&
2959 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
2960 atomic_dec(&instance->ldio_outstanding);
2961 scmd_local->SCp.ptr = NULL;
2962 megasas_return_cmd_fusion(instance, cmd);
2963 scsi_dma_unmap(scmd_local);
2964 scmd_local->scsi_done(scmd_local);
2965 }
2966}
2967
2968/**
2233 * complete_cmd_fusion - Completes command 2969 * complete_cmd_fusion - Completes command
2234 * @instance: Adapter soft state 2970 * @instance: Adapter soft state
2235 * Completes all commands that is in reply descriptor queue 2971 * Completes all commands that is in reply descriptor queue
@@ -2244,8 +2980,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2244 struct megasas_cmd *cmd_mfi; 2980 struct megasas_cmd *cmd_mfi;
2245 struct megasas_cmd_fusion *cmd_fusion; 2981 struct megasas_cmd_fusion *cmd_fusion;
2246 u16 smid, num_completed; 2982 u16 smid, num_completed;
2247 u8 reply_descript_type; 2983 u8 reply_descript_type, *sense, status, extStatus;
2248 u32 status, extStatus, device_id; 2984 u32 device_id, data_length;
2249 union desc_value d_val; 2985 union desc_value d_val;
2250 struct LD_LOAD_BALANCE_INFO *lbinfo; 2986 struct LD_LOAD_BALANCE_INFO *lbinfo;
2251 int threshold_reply_count = 0; 2987 int threshold_reply_count = 0;
@@ -2275,20 +3011,17 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2275 3011
2276 while (d_val.u.low != cpu_to_le32(UINT_MAX) && 3012 while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
2277 d_val.u.high != cpu_to_le32(UINT_MAX)) { 3013 d_val.u.high != cpu_to_le32(UINT_MAX)) {
2278 smid = le16_to_cpu(reply_desc->SMID);
2279 3014
3015 smid = le16_to_cpu(reply_desc->SMID);
2280 cmd_fusion = fusion->cmd_list[smid - 1]; 3016 cmd_fusion = fusion->cmd_list[smid - 1];
2281 3017 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
2282 scsi_io_req = 3018 cmd_fusion->io_request;
2283 (struct MPI2_RAID_SCSI_IO_REQUEST *)
2284 cmd_fusion->io_request;
2285
2286 if (cmd_fusion->scmd)
2287 cmd_fusion->scmd->SCp.ptr = NULL;
2288 3019
2289 scmd_local = cmd_fusion->scmd; 3020 scmd_local = cmd_fusion->scmd;
2290 status = scsi_io_req->RaidContext.status; 3021 status = scsi_io_req->RaidContext.raid_context.status;
2291 extStatus = scsi_io_req->RaidContext.exStatus; 3022 extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
3023 sense = cmd_fusion->sense;
3024 data_length = scsi_io_req->DataLength;
2292 3025
2293 switch (scsi_io_req->Function) { 3026 switch (scsi_io_req->Function) {
2294 case MPI2_FUNCTION_SCSI_TASK_MGMT: 3027 case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -2303,37 +3036,33 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2303 break; 3036 break;
2304 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 3037 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
2305 /* Update load balancing info */ 3038 /* Update load balancing info */
2306 device_id = MEGASAS_DEV_INDEX(scmd_local); 3039 if (fusion->load_balance_info &&
2307 lbinfo = &fusion->load_balance_info[device_id]; 3040 (cmd_fusion->scmd->SCp.Status &
2308 if (cmd_fusion->scmd->SCp.Status & 3041 MEGASAS_LOAD_BALANCE_FLAG)) {
2309 MEGASAS_LOAD_BALANCE_FLAG) { 3042 device_id = MEGASAS_DEV_INDEX(scmd_local);
3043 lbinfo = &fusion->load_balance_info[device_id];
2310 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 3044 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
2311 cmd_fusion->scmd->SCp.Status &= 3045 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
2312 ~MEGASAS_LOAD_BALANCE_FLAG;
2313 } 3046 }
2314 if (reply_descript_type == 3047 //Fall thru and complete IO
2315 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
2316 if (megasas_dbg_lvl == 5)
2317 dev_err(&instance->pdev->dev, "\nFAST Path "
2318 "IO Success\n");
2319 }
2320 /* Fall thru and complete IO */
2321 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 3048 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
2322 /* Map the FW Cmd Status */
2323 map_cmd_status(cmd_fusion, status, extStatus);
2324 scsi_io_req->RaidContext.status = 0;
2325 scsi_io_req->RaidContext.exStatus = 0;
2326 if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
2327 atomic_dec(&instance->ldio_outstanding);
2328 megasas_return_cmd_fusion(instance, cmd_fusion);
2329 scsi_dma_unmap(scmd_local);
2330 scmd_local->scsi_done(scmd_local);
2331 atomic_dec(&instance->fw_outstanding); 3049 atomic_dec(&instance->fw_outstanding);
2332 3050 if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
3051 map_cmd_status(fusion, scmd_local, status,
3052 extStatus, le32_to_cpu(data_length),
3053 sense);
3054 if (instance->ldio_threshold &&
3055 (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
3056 atomic_dec(&instance->ldio_outstanding);
3057 scmd_local->SCp.ptr = NULL;
3058 megasas_return_cmd_fusion(instance, cmd_fusion);
3059 scsi_dma_unmap(scmd_local);
3060 scmd_local->scsi_done(scmd_local);
3061 } else /* Optimal VD - R1 FP command completion. */
3062 megasas_complete_r1_command(instance, cmd_fusion);
2333 break; 3063 break;
2334 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 3064 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
2335 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 3065 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2336
2337 /* Poll mode. Dummy free. 3066 /* Poll mode. Dummy free.
2338 * In case of Interrupt mode, caller has reverse check. 3067 * In case of Interrupt mode, caller has reverse check.
2339 */ 3068 */
@@ -2376,7 +3105,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2376 * pending to be completed 3105 * pending to be completed
2377 */ 3106 */
2378 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 3107 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
2379 if (fusion->adapter_type == INVADER_SERIES) 3108 if (instance->msix_combined)
2380 writel(((MSIxIndex & 0x7) << 24) | 3109 writel(((MSIxIndex & 0x7) << 24) |
2381 fusion->last_reply_idx[MSIxIndex], 3110 fusion->last_reply_idx[MSIxIndex],
2382 instance->reply_post_host_index_addr[MSIxIndex/8]); 3111 instance->reply_post_host_index_addr[MSIxIndex/8]);
@@ -2392,7 +3121,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2392 return IRQ_NONE; 3121 return IRQ_NONE;
2393 3122
2394 wmb(); 3123 wmb();
2395 if (fusion->adapter_type == INVADER_SERIES) 3124 if (instance->msix_combined)
2396 writel(((MSIxIndex & 0x7) << 24) | 3125 writel(((MSIxIndex & 0x7) << 24) |
2397 fusion->last_reply_idx[MSIxIndex], 3126 fusion->last_reply_idx[MSIxIndex],
2398 instance->reply_post_host_index_addr[MSIxIndex/8]); 3127 instance->reply_post_host_index_addr[MSIxIndex/8]);
@@ -2405,6 +3134,22 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2405} 3134}
2406 3135
2407/** 3136/**
3137 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter
3138 * @instance: Adapter soft state
3139 */
3140void megasas_sync_irqs(unsigned long instance_addr)
3141{
3142 u32 count, i;
3143 struct megasas_instance *instance =
3144 (struct megasas_instance *)instance_addr;
3145
3146 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3147
3148 for (i = 0; i < count; i++)
3149 synchronize_irq(pci_irq_vector(instance->pdev, i));
3150}
3151
3152/**
2408 * megasas_complete_cmd_dpc_fusion - Completes command 3153 * megasas_complete_cmd_dpc_fusion - Completes command
2409 * @instance: Adapter soft state 3154 * @instance: Adapter soft state
2410 * 3155 *
@@ -2489,7 +3234,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
2489 * mfi_cmd: megasas_cmd pointer 3234 * mfi_cmd: megasas_cmd pointer
2490 * 3235 *
2491 */ 3236 */
2492u8 3237void
2493build_mpt_mfi_pass_thru(struct megasas_instance *instance, 3238build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2494 struct megasas_cmd *mfi_cmd) 3239 struct megasas_cmd *mfi_cmd)
2495{ 3240{
@@ -2518,7 +3263,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2518 3263
2519 io_req = cmd->io_request; 3264 io_req = cmd->io_request;
2520 3265
2521 if (fusion->adapter_type == INVADER_SERIES) { 3266 if (fusion->adapter_type >= INVADER_SERIES) {
2522 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 3267 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
2523 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 3268 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
2524 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 3269 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -2539,8 +3284,6 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2539 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3284 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2540 3285
2541 mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz); 3286 mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
2542
2543 return 0;
2544} 3287}
2545 3288
2546/** 3289/**
@@ -2552,21 +3295,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2552union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3295union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2553build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 3296build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2554{ 3297{
2555 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3298 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
2556 u16 index; 3299 u16 index;
2557 3300
2558 if (build_mpt_mfi_pass_thru(instance, cmd)) { 3301 build_mpt_mfi_pass_thru(instance, cmd);
2559 dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
2560 return NULL;
2561 }
2562
2563 index = cmd->context.smid; 3302 index = cmd->context.smid;
2564 3303
2565 req_desc = megasas_get_request_descriptor(instance, index - 1); 3304 req_desc = megasas_get_request_descriptor(instance, index - 1);
2566 3305
2567 if (!req_desc)
2568 return NULL;
2569
2570 req_desc->Words = 0; 3306 req_desc->Words = 0;
2571 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 3307 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2572 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3308 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2582,21 +3318,16 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2582 * @cmd: mfi cmd pointer 3318 * @cmd: mfi cmd pointer
2583 * 3319 *
2584 */ 3320 */
2585int 3321void
2586megasas_issue_dcmd_fusion(struct megasas_instance *instance, 3322megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2587 struct megasas_cmd *cmd) 3323 struct megasas_cmd *cmd)
2588{ 3324{
2589 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3325 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2590 3326
2591 req_desc = build_mpt_cmd(instance, cmd); 3327 req_desc = build_mpt_cmd(instance, cmd);
2592 if (!req_desc) {
2593 dev_info(&instance->pdev->dev, "Failed from %s %d\n",
2594 __func__, __LINE__);
2595 return DCMD_NOT_FIRED;
2596 }
2597 3328
2598 megasas_fire_cmd_fusion(instance, req_desc); 3329 megasas_fire_cmd_fusion(instance, req_desc);
2599 return DCMD_SUCCESS; 3330 return;
2600} 3331}
2601 3332
2602/** 3333/**
@@ -2771,6 +3502,14 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2771 " will reset adapter scsi%d.\n", 3502 " will reset adapter scsi%d.\n",
2772 instance->host->host_no); 3503 instance->host->host_no);
2773 megasas_complete_cmd_dpc_fusion((unsigned long)instance); 3504 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
3505 if (instance->requestorId && reason) {
3506 dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
3507 " state while polling during"
3508 " I/O timeout handling for %d\n",
3509 instance->host->host_no);
3510 *convert = 1;
3511 }
3512
2774 retval = 1; 3513 retval = 1;
2775 goto out; 3514 goto out;
2776 } 3515 }
@@ -2790,7 +3529,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2790 } 3529 }
2791 3530
2792 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ 3531 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
2793 if (instance->requestorId && reason) { 3532 if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) {
2794 if (instance->hb_host_mem->HB.fwCounter != 3533 if (instance->hb_host_mem->HB.fwCounter !=
2795 instance->hb_host_mem->HB.driverCounter) { 3534 instance->hb_host_mem->HB.driverCounter) {
2796 instance->hb_host_mem->HB.driverCounter = 3535 instance->hb_host_mem->HB.driverCounter =
@@ -3030,12 +3769,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
3030 3769
3031 req_desc = megasas_get_request_descriptor(instance, 3770 req_desc = megasas_get_request_descriptor(instance,
3032 (cmd_fusion->index - 1)); 3771 (cmd_fusion->index - 1));
3033 if (!req_desc) {
3034 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3035 __func__, __LINE__);
3036 megasas_return_cmd(instance, cmd_mfi);
3037 return -ENOMEM;
3038 }
3039 3772
3040 cmd_fusion->request_desc = req_desc; 3773 cmd_fusion->request_desc = req_desc;
3041 req_desc->Words = 0; 3774 req_desc->Words = 0;
@@ -3092,7 +3825,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
3092 break; 3825 break;
3093 else { 3826 else {
3094 instance->instancet->disable_intr(instance); 3827 instance->instancet->disable_intr(instance);
3095 msleep(1000); 3828 megasas_sync_irqs((unsigned long)instance);
3096 megasas_complete_cmd_dpc_fusion 3829 megasas_complete_cmd_dpc_fusion
3097 ((unsigned long)instance); 3830 ((unsigned long)instance);
3098 instance->instancet->enable_intr(instance); 3831 instance->instancet->enable_intr(instance);
@@ -3173,13 +3906,13 @@ static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
3173 instance = (struct megasas_instance *)sdev->host->hostdata; 3906 instance = (struct megasas_instance *)sdev->host->hostdata;
3174 fusion = instance->ctrl_context; 3907 fusion = instance->ctrl_context;
3175 3908
3176 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 3909 if (!MEGASAS_IS_LOGICAL(sdev)) {
3177 if (instance->use_seqnum_jbod_fp) { 3910 if (instance->use_seqnum_jbod_fp) {
3178 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 3911 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
3179 sdev->id; 3912 + sdev->id;
3180 pd_sync = (void *)fusion->pd_seq_sync 3913 pd_sync = (void *)fusion->pd_seq_sync
3181 [(instance->pd_seq_map_id - 1) & 1]; 3914 [(instance->pd_seq_map_id - 1) & 1];
3182 devhandle = pd_sync->seq[pd_index].devHandle; 3915 devhandle = pd_sync->seq[pd_index].devHandle;
3183 } else 3916 } else
3184 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" 3917 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
3185 " without JBOD MAP support from %s %d\n", __func__, __LINE__); 3918 " without JBOD MAP support from %s %d\n", __func__, __LINE__);
@@ -3212,6 +3945,9 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
3212 instance = (struct megasas_instance *)scmd->device->host->hostdata; 3945 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3213 fusion = instance->ctrl_context; 3946 fusion = instance->ctrl_context;
3214 3947
3948 scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
3949 scsi_print_command(scmd);
3950
3215 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 3951 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3216 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 3952 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3217 "SCSI host:%d\n", instance->host->host_no); 3953 "SCSI host:%d\n", instance->host->host_no);
@@ -3292,6 +4028,9 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
3292 instance = (struct megasas_instance *)scmd->device->host->hostdata; 4028 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3293 fusion = instance->ctrl_context; 4029 fusion = instance->ctrl_context;
3294 4030
4031 sdev_printk(KERN_INFO, scmd->device,
4032 "target reset called for scmd(%p)\n", scmd);
4033
3295 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { 4034 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3296 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," 4035 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3297 "SCSI host:%d\n", instance->host->host_no); 4036 "SCSI host:%d\n", instance->host->host_no);
@@ -3362,7 +4101,7 @@ int megasas_check_mpio_paths(struct megasas_instance *instance,
3362 struct scsi_cmnd *scmd) 4101 struct scsi_cmnd *scmd)
3363{ 4102{
3364 struct megasas_instance *peer_instance = NULL; 4103 struct megasas_instance *peer_instance = NULL;
3365 int retval = (DID_RESET << 16); 4104 int retval = (DID_REQUEUE << 16);
3366 4105
3367 if (instance->peerIsPresent) { 4106 if (instance->peerIsPresent) {
3368 peer_instance = megasas_get_peer_instance(instance); 4107 peer_instance = megasas_get_peer_instance(instance);
@@ -3377,9 +4116,9 @@ int megasas_check_mpio_paths(struct megasas_instance *instance,
3377/* Core fusion reset function */ 4116/* Core fusion reset function */
3378int megasas_reset_fusion(struct Scsi_Host *shost, int reason) 4117int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
3379{ 4118{
3380 int retval = SUCCESS, i, convert = 0; 4119 int retval = SUCCESS, i, j, convert = 0;
3381 struct megasas_instance *instance; 4120 struct megasas_instance *instance;
3382 struct megasas_cmd_fusion *cmd_fusion; 4121 struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
3383 struct fusion_context *fusion; 4122 struct fusion_context *fusion;
3384 u32 abs_state, status_reg, reset_adapter; 4123 u32 abs_state, status_reg, reset_adapter;
3385 u32 io_timeout_in_crash_mode = 0; 4124 u32 io_timeout_in_crash_mode = 0;
@@ -3440,7 +4179,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
3440 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4179 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3441 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); 4180 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
3442 instance->instancet->disable_intr(instance); 4181 instance->instancet->disable_intr(instance);
3443 msleep(1000); 4182 megasas_sync_irqs((unsigned long)instance);
3444 4183
3445 /* First try waiting for commands to complete */ 4184 /* First try waiting for commands to complete */
3446 if (megasas_wait_for_outstanding_fusion(instance, reason, 4185 if (megasas_wait_for_outstanding_fusion(instance, reason,
@@ -3451,23 +4190,40 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
3451 if (convert) 4190 if (convert)
3452 reason = 0; 4191 reason = 0;
3453 4192
4193 if (megasas_dbg_lvl & OCR_LOGS)
4194 dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
4195
3454 /* Now return commands back to the OS */ 4196 /* Now return commands back to the OS */
3455 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4197 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
3456 cmd_fusion = fusion->cmd_list[i]; 4198 cmd_fusion = fusion->cmd_list[i];
4199 /*check for extra commands issued by driver*/
4200 if (instance->is_ventura) {
4201 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
4202 megasas_return_cmd_fusion(instance, r1_cmd);
4203 }
3457 scmd_local = cmd_fusion->scmd; 4204 scmd_local = cmd_fusion->scmd;
3458 if (cmd_fusion->scmd) { 4205 if (cmd_fusion->scmd) {
4206 if (megasas_dbg_lvl & OCR_LOGS) {
4207 sdev_printk(KERN_INFO,
4208 cmd_fusion->scmd->device, "SMID: 0x%x\n",
4209 cmd_fusion->index);
4210 scsi_print_command(cmd_fusion->scmd);
4211 }
4212
3459 scmd_local->result = 4213 scmd_local->result =
3460 megasas_check_mpio_paths(instance, 4214 megasas_check_mpio_paths(instance,
3461 scmd_local); 4215 scmd_local);
3462 if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) 4216 if (instance->ldio_threshold &&
4217 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
3463 atomic_dec(&instance->ldio_outstanding); 4218 atomic_dec(&instance->ldio_outstanding);
3464 megasas_return_cmd_fusion(instance, cmd_fusion); 4219 megasas_return_cmd_fusion(instance, cmd_fusion);
3465 scsi_dma_unmap(scmd_local); 4220 scsi_dma_unmap(scmd_local);
3466 scmd_local->scsi_done(scmd_local); 4221 scmd_local->scsi_done(scmd_local);
3467 atomic_dec(&instance->fw_outstanding);
3468 } 4222 }
3469 } 4223 }
3470 4224
4225 atomic_set(&instance->fw_outstanding, 0);
4226
3471 status_reg = instance->instancet->read_fw_status_reg( 4227 status_reg = instance->instancet->read_fw_status_reg(
3472 instance->reg_set); 4228 instance->reg_set);
3473 abs_state = status_reg & MFI_STATE_MASK; 4229 abs_state = status_reg & MFI_STATE_MASK;
@@ -3528,11 +4284,13 @@ transition_to_ready:
3528 __func__, __LINE__); 4284 __func__, __LINE__);
3529 megaraid_sas_kill_hba(instance); 4285 megaraid_sas_kill_hba(instance);
3530 retval = FAILED; 4286 retval = FAILED;
4287 goto out;
3531 } 4288 }
3532 /* Reset load balance info */ 4289 /* Reset load balance info */
3533 memset(fusion->load_balance_info, 0, 4290 if (fusion->load_balance_info)
3534 sizeof(struct LD_LOAD_BALANCE_INFO) 4291 memset(fusion->load_balance_info, 0,
3535 *MAX_LOGICAL_DRIVES_EXT); 4292 (sizeof(struct LD_LOAD_BALANCE_INFO) *
4293 MAX_LOGICAL_DRIVES_EXT));
3536 4294
3537 if (!megasas_get_map_info(instance)) 4295 if (!megasas_get_map_info(instance))
3538 megasas_sync_map_info(instance); 4296 megasas_sync_map_info(instance);
@@ -3540,7 +4298,17 @@ transition_to_ready:
3540 megasas_setup_jbod_map(instance); 4298 megasas_setup_jbod_map(instance);
3541 4299
3542 shost_for_each_device(sdev, shost) 4300 shost_for_each_device(sdev, shost)
3543 megasas_update_sdev_properties(sdev); 4301 megasas_set_dynamic_target_properties(sdev);
4302
4303 /* reset stream detection array */
4304 if (instance->is_ventura) {
4305 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
4306 memset(fusion->stream_detect_by_ld[j],
4307 0, sizeof(struct LD_STREAM_DETECT));
4308 fusion->stream_detect_by_ld[j]->mru_bit_map
4309 = MR_STREAM_BITMAP;
4310 }
4311 }
3544 4312
3545 clear_bit(MEGASAS_FUSION_IN_RESET, 4313 clear_bit(MEGASAS_FUSION_IN_RESET,
3546 &instance->reset_flags); 4314 &instance->reset_flags);
@@ -3676,6 +4444,64 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
3676 megasas_reset_fusion(instance->host, 0); 4444 megasas_reset_fusion(instance->host, 0);
3677} 4445}
3678 4446
4447/* Allocate fusion context */
4448int
4449megasas_alloc_fusion_context(struct megasas_instance *instance)
4450{
4451 struct fusion_context *fusion;
4452
4453 instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
4454 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4455 instance->ctrl_context_pages);
4456 if (!instance->ctrl_context) {
4457 /* fall back to using vmalloc for fusion_context */
4458 instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
4459 if (!instance->ctrl_context) {
4460 dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
4461 return -ENOMEM;
4462 }
4463 }
4464
4465 fusion = instance->ctrl_context;
4466
4467 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
4468 sizeof(struct LD_LOAD_BALANCE_INFO));
4469 fusion->load_balance_info =
4470 (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4471 fusion->load_balance_info_pages);
4472 if (!fusion->load_balance_info) {
4473 fusion->load_balance_info = vzalloc(MAX_LOGICAL_DRIVES_EXT *
4474 sizeof(struct LD_LOAD_BALANCE_INFO));
4475 if (!fusion->load_balance_info)
4476 dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, "
4477 "continuing without Load Balance support\n");
4478 }
4479
4480 return 0;
4481}
4482
4483void
4484megasas_free_fusion_context(struct megasas_instance *instance)
4485{
4486 struct fusion_context *fusion = instance->ctrl_context;
4487
4488 if (fusion) {
4489 if (fusion->load_balance_info) {
4490 if (is_vmalloc_addr(fusion->load_balance_info))
4491 vfree(fusion->load_balance_info);
4492 else
4493 free_pages((ulong)fusion->load_balance_info,
4494 fusion->load_balance_info_pages);
4495 }
4496
4497 if (is_vmalloc_addr(fusion))
4498 vfree(fusion);
4499 else
4500 free_pages((ulong)fusion,
4501 instance->ctrl_context_pages);
4502 }
4503}
4504
3679struct megasas_instance_template megasas_instance_template_fusion = { 4505struct megasas_instance_template megasas_instance_template_fusion = {
3680 .enable_intr = megasas_enable_intr_fusion, 4506 .enable_intr = megasas_enable_intr_fusion,
3681 .disable_intr = megasas_disable_intr_fusion, 4507 .disable_intr = megasas_disable_intr_fusion,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index e3bee04c1eb1..d78d76112501 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -59,6 +59,8 @@
59#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 59#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
60#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 60#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
61#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 61#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
62#define MR_RL_WRITE_THROUGH_MODE 0x00
63#define MR_RL_WRITE_BACK_MODE 0x01
62 64
63/* T10 PI defines */ 65/* T10 PI defines */
64#define MR_PROT_INFO_TYPE_CONTROLLER 0x8 66#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
@@ -81,6 +83,11 @@
81enum MR_RAID_FLAGS_IO_SUB_TYPE { 83enum MR_RAID_FLAGS_IO_SUB_TYPE {
82 MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, 84 MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
83 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1, 85 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
86 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2,
87 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
88 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
89 MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
90 MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
84}; 91};
85 92
86/* 93/*
@@ -94,11 +101,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
94#define MEGASAS_FP_CMD_LEN 16 101#define MEGASAS_FP_CMD_LEN 16
95#define MEGASAS_FUSION_IN_RESET 0 102#define MEGASAS_FUSION_IN_RESET 0
96#define THRESHOLD_REPLY_COUNT 50 103#define THRESHOLD_REPLY_COUNT 50
104#define RAID_1_PEER_CMDS 2
97#define JBOD_MAPS_COUNT 2 105#define JBOD_MAPS_COUNT 2
98 106
99enum MR_FUSION_ADAPTER_TYPE { 107enum MR_FUSION_ADAPTER_TYPE {
100 THUNDERBOLT_SERIES = 0, 108 THUNDERBOLT_SERIES = 0,
101 INVADER_SERIES = 1, 109 INVADER_SERIES = 1,
110 VENTURA_SERIES = 2,
102}; 111};
103 112
104/* 113/*
@@ -108,29 +117,133 @@ enum MR_FUSION_ADAPTER_TYPE {
108 117
109struct RAID_CONTEXT { 118struct RAID_CONTEXT {
110#if defined(__BIG_ENDIAN_BITFIELD) 119#if defined(__BIG_ENDIAN_BITFIELD)
111 u8 nseg:4; 120 u8 nseg:4;
112 u8 Type:4; 121 u8 type:4;
113#else 122#else
114 u8 Type:4; 123 u8 type:4;
115 u8 nseg:4; 124 u8 nseg:4;
116#endif 125#endif
117 u8 resvd0; 126 u8 resvd0;
118 __le16 timeoutValue; 127 __le16 timeout_value;
119 u8 regLockFlags; 128 u8 reg_lock_flags;
120 u8 resvd1; 129 u8 resvd1;
121 __le16 VirtualDiskTgtId; 130 __le16 virtual_disk_tgt_id;
122 __le64 regLockRowLBA; 131 __le64 reg_lock_row_lba;
123 __le32 regLockLength; 132 __le32 reg_lock_length;
124 __le16 nextLMId; 133 __le16 next_lmid;
125 u8 exStatus; 134 u8 ex_status;
126 u8 status; 135 u8 status;
127 u8 RAIDFlags; 136 u8 raid_flags;
128 u8 numSGE; 137 u8 num_sge;
129 __le16 configSeqNum; 138 __le16 config_seq_num;
130 u8 spanArm; 139 u8 span_arm;
131 u8 priority; 140 u8 priority;
132 u8 numSGEExt; 141 u8 num_sge_ext;
133 u8 resvd2; 142 u8 resvd2;
143};
144
145/*
146 * Raid Context structure which describes ventura MegaRAID specific
147 * IO Paramenters ,This resides at offset 0x60 where the SGL normally
148 * starts in MPT IO Frames
149 */
150struct RAID_CONTEXT_G35 {
151 #define RAID_CONTEXT_NSEG_MASK 0x00F0
152 #define RAID_CONTEXT_NSEG_SHIFT 4
153 #define RAID_CONTEXT_TYPE_MASK 0x000F
154 #define RAID_CONTEXT_TYPE_SHIFT 0
155 u16 nseg_type;
156 u16 timeout_value; /* 0x02 -0x03 */
157 u16 routing_flags; // 0x04 -0x05 routing flags
158 u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
159 u64 reg_lock_row_lba; /* 0x08 - 0x0F */
160 u32 reg_lock_length; /* 0x10 - 0x13 */
161 union {
162 u16 next_lmid; /* 0x14 - 0x15 */
163 u16 peer_smid; /* used for the raid 1/10 fp writes */
164 } smid;
165 u8 ex_status; /* 0x16 : OUT */
166 u8 status; /* 0x17 status */
167 u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
168 * resvd[3:1], preferredCpu[0]
169 */
170 u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
171 u16 config_seq_num; /* 0x1A -0x1B */
172 union {
173 /*
174 * Bit format:
175 * ---------------------------------
176 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
177 * ---------------------------------
178 * Byte0 | numSGE[7]- numSGE[0] |
179 * ---------------------------------
180 * Byte1 |SD | resvd | numSGE 8-11 |
181 * --------------------------------
182 */
183 #define NUM_SGE_MASK_LOWER 0xFF
184 #define NUM_SGE_MASK_UPPER 0x0F
185 #define NUM_SGE_SHIFT_UPPER 8
186 #define STREAM_DETECT_SHIFT 7
187 #define STREAM_DETECT_MASK 0x80
188 struct {
189#if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
190 u16 stream_detected:1;
191 u16 reserved:3;
192 u16 num_sge:12;
193#else
194 u16 num_sge:12;
195 u16 reserved:3;
196 u16 stream_detected:1;
197#endif
198 } bits;
199 u8 bytes[2];
200 } u;
201 u8 resvd2[2]; /* 0x1E-0x1F */
202};
203
204#define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1
205#define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2
206#define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3
207#define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4
208#define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5
209#define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6
210#define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7
211#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8
212#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00
213#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12
214#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000
215
216static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
217 u16 sge_count)
218{
219 rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
220 rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
221 & NUM_SGE_MASK_UPPER);
222}
223
224static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
225{
226 u16 sge_count;
227
228 sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
229 << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
230 return sge_count;
231}
232
233#define SET_STREAM_DETECTED(rctx_g35) \
234 (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
235
236#define CLEAR_STREAM_DETECTED(rctx_g35) \
237 (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
238
239static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
240{
241 return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
242}
243
244union RAID_CONTEXT_UNION {
245 struct RAID_CONTEXT raid_context;
246 struct RAID_CONTEXT_G35 raid_context_g35;
134}; 247};
135 248
136#define RAID_CTX_SPANARM_ARM_SHIFT (0) 249#define RAID_CTX_SPANARM_ARM_SHIFT (0)
@@ -139,6 +252,14 @@ struct RAID_CONTEXT {
139#define RAID_CTX_SPANARM_SPAN_SHIFT (5) 252#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
140#define RAID_CTX_SPANARM_SPAN_MASK (0xE0) 253#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
141 254
255/* number of bits per index in U32 TrackStream */
256#define BITS_PER_INDEX_STREAM 4
257#define INVALID_STREAM_NUM 16
258#define MR_STREAM_BITMAP 0x76543210
259#define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1)
260#define ZERO_LAST_STREAM 0x0fffffff
261#define MAX_STREAMS_TRACKED 8
262
142/* 263/*
143 * define region lock types 264 * define region lock types
144 */ 265 */
@@ -175,6 +296,8 @@ enum REGION_TYPE {
175#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) 296#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
176#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) 297#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
177#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) 298#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
299/* EEDP escape mode */
300#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
178#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ 301#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
179#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) 302#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
180#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03) 303#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
@@ -407,7 +530,7 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
407 u8 LUN[8]; /* 0x34 */ 530 u8 LUN[8]; /* 0x34 */
408 __le32 Control; /* 0x3C */ 531 __le32 Control; /* 0x3C */
409 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ 532 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
410 struct RAID_CONTEXT RaidContext; /* 0x60 */ 533 union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
411 union MPI2_SGE_IO_UNION SGL; /* 0x80 */ 534 union MPI2_SGE_IO_UNION SGL; /* 0x80 */
412}; 535};
413 536
@@ -563,7 +686,7 @@ struct MPI2_IOC_INIT_REQUEST {
563 __le16 HeaderVersion; /* 0x0E */ 686 __le16 HeaderVersion; /* 0x0E */
564 u32 Reserved5; /* 0x10 */ 687 u32 Reserved5; /* 0x10 */
565 __le16 Reserved6; /* 0x14 */ 688 __le16 Reserved6; /* 0x14 */
566 u8 Reserved7; /* 0x16 */ 689 u8 HostPageSize; /* 0x16 */
567 u8 HostMSIxVectors; /* 0x17 */ 690 u8 HostMSIxVectors; /* 0x17 */
568 __le16 Reserved8; /* 0x18 */ 691 __le16 Reserved8; /* 0x18 */
569 __le16 SystemRequestFrameSize; /* 0x1A */ 692 __le16 SystemRequestFrameSize; /* 0x1A */
@@ -579,6 +702,7 @@ struct MPI2_IOC_INIT_REQUEST {
579 702
580/* mrpriv defines */ 703/* mrpriv defines */
581#define MR_PD_INVALID 0xFFFF 704#define MR_PD_INVALID 0xFFFF
705#define MR_DEVHANDLE_INVALID 0xFFFF
582#define MAX_SPAN_DEPTH 8 706#define MAX_SPAN_DEPTH 8
583#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH 707#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
584#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) 708#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
@@ -586,16 +710,20 @@ struct MPI2_IOC_INIT_REQUEST {
586#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) 710#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
587#define MAX_LOGICAL_DRIVES 64 711#define MAX_LOGICAL_DRIVES 64
588#define MAX_LOGICAL_DRIVES_EXT 256 712#define MAX_LOGICAL_DRIVES_EXT 256
713#define MAX_LOGICAL_DRIVES_DYN 512
589#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) 714#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
590#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) 715#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
591#define MAX_ARRAYS 128 716#define MAX_ARRAYS 128
592#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) 717#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
593#define MAX_ARRAYS_EXT 256 718#define MAX_ARRAYS_EXT 256
594#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT) 719#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
720#define MAX_API_ARRAYS_DYN 512
595#define MAX_PHYSICAL_DEVICES 256 721#define MAX_PHYSICAL_DEVICES 256
596#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) 722#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
723#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
597#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 724#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
598#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 725#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
726#define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103
599#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/ 727#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
600#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200 728#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
601#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 729#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
@@ -603,7 +731,7 @@ struct MPI2_IOC_INIT_REQUEST {
603struct MR_DEV_HANDLE_INFO { 731struct MR_DEV_HANDLE_INFO {
604 __le16 curDevHdl; 732 __le16 curDevHdl;
605 u8 validHandles; 733 u8 validHandles;
606 u8 reserved; 734 u8 interfaceType;
607 __le16 devHandle[2]; 735 __le16 devHandle[2];
608}; 736};
609 737
@@ -640,10 +768,56 @@ struct MR_SPAN_BLOCK_INFO {
640 struct MR_SPAN_INFO block_span_info; 768 struct MR_SPAN_INFO block_span_info;
641}; 769};
642 770
771#define MR_RAID_CTX_CPUSEL_0 0
772#define MR_RAID_CTX_CPUSEL_1 1
773#define MR_RAID_CTX_CPUSEL_2 2
774#define MR_RAID_CTX_CPUSEL_3 3
775#define MR_RAID_CTX_CPUSEL_FCFS 0xF
776
777struct MR_CPU_AFFINITY_MASK {
778 union {
779 struct {
780#ifndef MFI_BIG_ENDIAN
781 u8 hw_path:1;
782 u8 cpu0:1;
783 u8 cpu1:1;
784 u8 cpu2:1;
785 u8 cpu3:1;
786 u8 reserved:3;
787#else
788 u8 reserved:3;
789 u8 cpu3:1;
790 u8 cpu2:1;
791 u8 cpu1:1;
792 u8 cpu0:1;
793 u8 hw_path:1;
794#endif
795 };
796 u8 core_mask;
797 };
798};
799
800struct MR_IO_AFFINITY {
801 union {
802 struct {
803 struct MR_CPU_AFFINITY_MASK pdRead;
804 struct MR_CPU_AFFINITY_MASK pdWrite;
805 struct MR_CPU_AFFINITY_MASK ldRead;
806 struct MR_CPU_AFFINITY_MASK ldWrite;
807 };
808 u32 word;
809 };
810 u8 maxCores; /* Total cores + HW Path in ROC */
811 u8 reserved[3];
812};
813
643struct MR_LD_RAID { 814struct MR_LD_RAID {
644 struct { 815 struct {
645#if defined(__BIG_ENDIAN_BITFIELD) 816#if defined(__BIG_ENDIAN_BITFIELD)
646 u32 reserved4:5; 817 u32 reserved4:2;
818 u32 fp_cache_bypass_capable:1;
819 u32 fp_rmw_capable:1;
820 u32 disable_coalescing:1;
647 u32 fpBypassRegionLock:1; 821 u32 fpBypassRegionLock:1;
648 u32 tmCapable:1; 822 u32 tmCapable:1;
649 u32 fpNonRWCapable:1; 823 u32 fpNonRWCapable:1;
@@ -654,11 +828,13 @@ struct MR_LD_RAID {
654 u32 encryptionType:8; 828 u32 encryptionType:8;
655 u32 pdPiMode:4; 829 u32 pdPiMode:4;
656 u32 ldPiMode:4; 830 u32 ldPiMode:4;
657 u32 reserved5:3; 831 u32 reserved5:2;
832 u32 ra_capable:1;
658 u32 fpCapable:1; 833 u32 fpCapable:1;
659#else 834#else
660 u32 fpCapable:1; 835 u32 fpCapable:1;
661 u32 reserved5:3; 836 u32 ra_capable:1;
837 u32 reserved5:2;
662 u32 ldPiMode:4; 838 u32 ldPiMode:4;
663 u32 pdPiMode:4; 839 u32 pdPiMode:4;
664 u32 encryptionType:8; 840 u32 encryptionType:8;
@@ -669,7 +845,10 @@ struct MR_LD_RAID {
669 u32 fpNonRWCapable:1; 845 u32 fpNonRWCapable:1;
670 u32 tmCapable:1; 846 u32 tmCapable:1;
671 u32 fpBypassRegionLock:1; 847 u32 fpBypassRegionLock:1;
672 u32 reserved4:5; 848 u32 disable_coalescing:1;
849 u32 fp_rmw_capable:1;
850 u32 fp_cache_bypass_capable:1;
851 u32 reserved4:2;
673#endif 852#endif
674 } capability; 853 } capability;
675 __le32 reserved6; 854 __le32 reserved6;
@@ -696,7 +875,36 @@ struct MR_LD_RAID {
696 875
697 u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ 876 u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
698 u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ 877 u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
699 u8 reserved3[0x80-0x2D]; /* 0x2D */ 878 /* Ox2D This LD accept priority boost of this type */
879 u8 ld_accept_priority_type;
880 u8 reserved2[2]; /* 0x2E - 0x2F */
881 /* 0x30 - 0x33, Logical block size for the LD */
882 u32 logical_block_length;
883 struct {
884#ifndef MFI_BIG_ENDIAN
885 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
886 u32 ld_pi_exp:4;
887 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
888 * BLOCK EXPONENT from READ CAPACITY 16
889 */
890 u32 ld_logical_block_exp:4;
891 u32 reserved1:24; /* 0x34 */
892#else
893 u32 reserved1:24; /* 0x34 */
894 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
895 * BLOCK EXPONENT from READ CAPACITY 16
896 */
897 u32 ld_logical_block_exp:4;
898 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
899 u32 ld_pi_exp:4;
900#endif
901 }; /* 0x34 - 0x37 */
902 /* 0x38 - 0x3f, This will determine which
903 * core will process LD IO and PD IO.
904 */
905 struct MR_IO_AFFINITY cpuAffinity;
906 /* Bit definiations are specified by MR_IO_AFFINITY */
907 u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */
700}; 908};
701 909
702struct MR_LD_SPAN_MAP { 910struct MR_LD_SPAN_MAP {
@@ -735,6 +943,7 @@ struct IO_REQUEST_INFO {
735 u16 ldTgtId; 943 u16 ldTgtId;
736 u8 isRead; 944 u8 isRead;
737 __le16 devHandle; 945 __le16 devHandle;
946 u8 pd_interface;
738 u64 pdBlock; 947 u64 pdBlock;
739 u8 fpOkForIo; 948 u8 fpOkForIo;
740 u8 IoforUnevenSpan; 949 u8 IoforUnevenSpan;
@@ -743,6 +952,8 @@ struct IO_REQUEST_INFO {
743 u64 start_row; 952 u64 start_row;
744 u8 span_arm; /* span[7:5], arm[4:0] */ 953 u8 span_arm; /* span[7:5], arm[4:0] */
745 u8 pd_after_lb; 954 u8 pd_after_lb;
955 u16 r1_alt_dev_handle; /* raid 1/10 only */
956 bool ra_capable;
746}; 957};
747 958
748struct MR_LD_TARGET_SYNC { 959struct MR_LD_TARGET_SYNC {
@@ -751,6 +962,91 @@ struct MR_LD_TARGET_SYNC {
751 __le16 seqNum; 962 __le16 seqNum;
752}; 963};
753 964
965/*
966 * RAID Map descriptor Types.
967 * Each element should uniquely idetify one data structure in the RAID map
968 */
969enum MR_RAID_MAP_DESC_TYPE {
970 /* MR_DEV_HANDLE_INFO data */
971 RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0,
972 /* target to Ld num Index map */
973 RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1,
974 /* MR_ARRAY_INFO data */
975 RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2,
976 /* MR_LD_SPAN_MAP data */
977 RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3,
978 RAID_MAP_DESC_TYPE_COUNT,
979};
980
981/*
982 * This table defines the offset, size and num elements of each descriptor
983 * type in the RAID Map buffer
984 */
985struct MR_RAID_MAP_DESC_TABLE {
986 /* Raid map descriptor type */
987 u32 raid_map_desc_type;
988 /* Offset into the RAID map buffer where
989 * descriptor data is saved
990 */
991 u32 raid_map_desc_offset;
992 /* total size of the
993 * descriptor buffer
994 */
995 u32 raid_map_desc_buffer_size;
996 /* Number of elements contained in the
997 * descriptor buffer
998 */
999 u32 raid_map_desc_elements;
1000};
1001
1002/*
1003 * Dynamic Raid Map Structure.
1004 */
1005struct MR_FW_RAID_MAP_DYNAMIC {
1006 u32 raid_map_size; /* total size of RAID Map structure */
1007 u32 desc_table_offset;/* Offset of desc table into RAID map*/
1008 u32 desc_table_size; /* Total Size of desc table */
1009 /* Total Number of elements in the desc table */
1010 u32 desc_table_num_elements;
1011 u64 reserved1;
1012 u32 reserved2[3]; /*future use */
1013 /* timeout value used by driver in FP IOs */
1014 u8 fp_pd_io_timeout_sec;
1015 u8 reserved3[3];
1016 /* when this seqNum increments, driver needs to
1017 * release RMW buffers asap
1018 */
1019 u32 rmw_fp_seq_num;
1020 u16 ld_count; /* count of lds. */
1021 u16 ar_count; /* count of arrays */
1022 u16 span_count; /* count of spans */
1023 u16 reserved4[3];
1024/*
1025 * The below structure of pointers is only to be used by the driver.
1026 * This is added in the ,API to reduce the amount of code changes
1027 * needed in the driver to support dynamic RAID map Firmware should
1028 * not update these pointers while preparing the raid map
1029 */
1030 union {
1031 struct {
1032 struct MR_DEV_HANDLE_INFO *dev_hndl_info;
1033 u16 *ld_tgt_id_to_ld;
1034 struct MR_ARRAY_INFO *ar_map_info;
1035 struct MR_LD_SPAN_MAP *ld_span_map;
1036 };
1037 u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
1038 };
1039/*
1040 * RAID Map descriptor table defines the layout of data in the RAID Map.
1041 * The size of the descriptor table itself could change.
1042 */
1043 /* Variable Size descriptor Table. */
1044 struct MR_RAID_MAP_DESC_TABLE
1045 raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
1046 /* Variable Size buffer containing all data */
1047 u32 raid_map_desc_data[1];
1048}; /* Dynamicaly sized RAID MAp structure */
1049
754#define IEEE_SGE_FLAGS_ADDR_MASK (0x03) 1050#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
755#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) 1051#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
756#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) 1052#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
@@ -759,6 +1055,16 @@ struct MR_LD_TARGET_SYNC {
759#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) 1055#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
760#define IEEE_SGE_FLAGS_END_OF_LIST (0x40) 1056#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
761 1057
1058#define MPI2_SGE_FLAGS_SHIFT (0x02)
1059#define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0)
1060#define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00)
1061#define IEEE_SGE_FLAGS_FORMAT_NVME (0x02)
1062
1063#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
1064#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
1065#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
1066#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
1067
762struct megasas_register_set; 1068struct megasas_register_set;
763struct megasas_instance; 1069struct megasas_instance;
764 1070
@@ -795,6 +1101,10 @@ struct megasas_cmd_fusion {
795 u32 index; 1101 u32 index;
796 u8 pd_r1_lb; 1102 u8 pd_r1_lb;
797 struct completion done; 1103 struct completion done;
1104 u8 pd_interface;
1105 u16 r1_alt_dev_handle; /* raid 1/10 only*/
1106 bool cmd_completed; /* raid 1/10 fp writes status holder */
1107
798}; 1108};
799 1109
800struct LD_LOAD_BALANCE_INFO { 1110struct LD_LOAD_BALANCE_INFO {
@@ -856,9 +1166,10 @@ struct MR_DRV_RAID_MAP {
856 __le16 spanCount; 1166 __le16 spanCount;
857 __le16 reserve3; 1167 __le16 reserve3;
858 1168
859 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; 1169 struct MR_DEV_HANDLE_INFO
860 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; 1170 devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
861 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; 1171 u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
1172 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
862 struct MR_LD_SPAN_MAP ldSpanMap[1]; 1173 struct MR_LD_SPAN_MAP ldSpanMap[1];
863 1174
864}; 1175};
@@ -870,7 +1181,7 @@ struct MR_DRV_RAID_MAP {
870struct MR_DRV_RAID_MAP_ALL { 1181struct MR_DRV_RAID_MAP_ALL {
871 1182
872 struct MR_DRV_RAID_MAP raidMap; 1183 struct MR_DRV_RAID_MAP raidMap;
873 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1]; 1184 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
874} __packed; 1185} __packed;
875 1186
876 1187
@@ -919,7 +1230,8 @@ struct MR_PD_CFG_SEQ {
919 u8 reserved:7; 1230 u8 reserved:7;
920#endif 1231#endif
921 } capability; 1232 } capability;
922 u8 reserved[3]; 1233 u8 reserved;
1234 u16 pd_target_id;
923} __packed; 1235} __packed;
924 1236
925struct MR_PD_CFG_SEQ_NUM_SYNC { 1237struct MR_PD_CFG_SEQ_NUM_SYNC {
@@ -928,6 +1240,30 @@ struct MR_PD_CFG_SEQ_NUM_SYNC {
928 struct MR_PD_CFG_SEQ seq[1]; 1240 struct MR_PD_CFG_SEQ seq[1];
929} __packed; 1241} __packed;
930 1242
1243/* stream detection */
1244struct STREAM_DETECT {
1245 u64 next_seq_lba; /* next LBA to match sequential access */
1246 struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
1247 struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
1248 u32 count_cmds_in_stream; /* count of host commands in this stream */
1249 u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
1250 u8 is_read; /* SCSI OpCode for this stream */
1251 u8 group_depth; /* total number of host commands in group */
1252 /* TRUE if cannot add any more commands to this group */
1253 bool group_flush;
1254 u8 reserved[7]; /* pad to 64-bit alignment */
1255};
1256
1257struct LD_STREAM_DETECT {
1258 bool write_back; /* TRUE if WB, FALSE if WT */
1259 bool fp_write_enabled;
1260 bool members_ssds;
1261 bool fp_cache_bypass_capable;
1262 u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
1263 /* this is the array of stream detect structures (one per stream) */
1264 struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
1265};
1266
931struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { 1267struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
932 u64 RDPQBaseAddress; 1268 u64 RDPQBaseAddress;
933 u32 Reserved1; 1269 u32 Reserved1;
@@ -965,7 +1301,7 @@ struct fusion_context {
965 u8 chain_offset_io_request; 1301 u8 chain_offset_io_request;
966 u8 chain_offset_mfi_pthru; 1302 u8 chain_offset_mfi_pthru;
967 1303
968 struct MR_FW_RAID_MAP_ALL *ld_map[2]; 1304 struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
969 dma_addr_t ld_map_phys[2]; 1305 dma_addr_t ld_map_phys[2];
970 1306
971 /*Non dma-able memory. Driver local copy.*/ 1307 /*Non dma-able memory. Driver local copy.*/
@@ -973,14 +1309,18 @@ struct fusion_context {
973 1309
974 u32 max_map_sz; 1310 u32 max_map_sz;
975 u32 current_map_sz; 1311 u32 current_map_sz;
1312 u32 old_map_sz;
1313 u32 new_map_sz;
976 u32 drv_map_sz; 1314 u32 drv_map_sz;
977 u32 drv_map_pages; 1315 u32 drv_map_pages;
978 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT]; 1316 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
979 dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT]; 1317 dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
980 u8 fast_path_io; 1318 u8 fast_path_io;
981 struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT]; 1319 struct LD_LOAD_BALANCE_INFO *load_balance_info;
1320 u32 load_balance_info_pages;
982 LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; 1321 LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
983 u8 adapter_type; 1322 u8 adapter_type;
1323 struct LD_STREAM_DETECT **stream_detect_by_ld;
984}; 1324};
985 1325
986union desc_value { 1326union desc_value {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 8bae305bc156..af4be403582e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -624,6 +624,8 @@ typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
624 624
625/* defines for ReasonCode field */ 625/* defines for ReasonCode field */
626#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) 626#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
627#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01)
628#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED (0x02)
627 629
628/*Hard Reset Received Event data */ 630/*Hard Reset Received Event data */
629 631
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index f00ef88a378a..a3fe1fb55c17 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1040,6 +1040,25 @@ _base_interrupt(int irq, void *bus_id)
1040 reply_q->reply_post_free[reply_q->reply_post_host_index]. 1040 reply_q->reply_post_free[reply_q->reply_post_host_index].
1041 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1041 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1042 completed_cmds++; 1042 completed_cmds++;
1043 /* Update the reply post host index after continuously
1044 * processing the threshold number of Reply Descriptors.
1045 * So that FW can find enough entries to post the Reply
1046 * Descriptors in the reply descriptor post queue.
1047 */
1048 if (completed_cmds > ioc->hba_queue_depth/3) {
1049 if (ioc->combined_reply_queue) {
1050 writel(reply_q->reply_post_host_index |
1051 ((msix_index & 7) <<
1052 MPI2_RPHI_MSIX_INDEX_SHIFT),
1053 ioc->replyPostRegisterIndex[msix_index/8]);
1054 } else {
1055 writel(reply_q->reply_post_host_index |
1056 (msix_index <<
1057 MPI2_RPHI_MSIX_INDEX_SHIFT),
1058 &ioc->chip->ReplyPostHostIndex);
1059 }
1060 completed_cmds = 1;
1061 }
1043 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1062 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1044 goto out; 1063 goto out;
1045 if (!reply_q->reply_post_host_index) 1064 if (!reply_q->reply_post_host_index)
@@ -5522,6 +5541,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5522 goto out_free_resources; 5541 goto out_free_resources;
5523 5542
5524 ioc->non_operational_loop = 0; 5543 ioc->non_operational_loop = 0;
5544 ioc->got_task_abort_from_ioctl = 0;
5525 return 0; 5545 return 0;
5526 5546
5527 out_free_resources: 5547 out_free_resources:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index dcb33f4fa687..4ab634fc27df 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
73#define MPT3SAS_DRIVER_NAME "mpt3sas" 73#define MPT3SAS_DRIVER_NAME "mpt3sas"
74#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 74#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
75#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 75#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
76#define MPT3SAS_DRIVER_VERSION "14.101.00.00" 76#define MPT3SAS_DRIVER_VERSION "15.100.00.00"
77#define MPT3SAS_MAJOR_VERSION 14 77#define MPT3SAS_MAJOR_VERSION 15
78#define MPT3SAS_MINOR_VERSION 101 78#define MPT3SAS_MINOR_VERSION 100
79#define MPT3SAS_BUILD_VERSION 0 79#define MPT3SAS_BUILD_VERSION 0
80#define MPT3SAS_RELEASE_VERSION 00 80#define MPT3SAS_RELEASE_VERSION 00
81 81
@@ -1000,6 +1000,7 @@ struct MPT3SAS_ADAPTER {
1000 u8 broadcast_aen_busy; 1000 u8 broadcast_aen_busy;
1001 u16 broadcast_aen_pending; 1001 u16 broadcast_aen_pending;
1002 u8 shost_recovery; 1002 u8 shost_recovery;
1003 u8 got_task_abort_from_ioctl;
1003 1004
1004 struct mutex reset_in_progress_mutex; 1005 struct mutex reset_in_progress_mutex;
1005 spinlock_t ioc_reset_in_progress_lock; 1006 spinlock_t ioc_reset_in_progress_lock;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 95f0f24bac05..02fe1c4aae2f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -826,16 +826,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
826 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 826 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
827 ioc->name, 827 ioc->name,
828 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 828 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
829 829 ioc->got_task_abort_from_ioctl = 1;
830 if (tm_request->TaskType == 830 if (tm_request->TaskType ==
831 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 831 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
832 tm_request->TaskType == 832 tm_request->TaskType ==
833 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { 833 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
834 if (_ctl_set_task_mid(ioc, &karg, tm_request)) { 834 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
835 mpt3sas_base_free_smid(ioc, smid); 835 mpt3sas_base_free_smid(ioc, smid);
836 ioc->got_task_abort_from_ioctl = 0;
836 goto out; 837 goto out;
837 } 838 }
838 } 839 }
840 ioc->got_task_abort_from_ioctl = 0;
839 841
840 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 842 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
841 dtmprintk(ioc, pr_info(MPT3SAS_FMT 843 dtmprintk(ioc, pr_info(MPT3SAS_FMT
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c6d550551504..46e866c36c8a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1075,6 +1075,26 @@ _scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1075} 1075}
1076 1076
1077/** 1077/**
1078 * __scsih_scsi_lookup_get_clear - returns scmd entry without
1079 * holding any lock.
1080 * @ioc: per adapter object
1081 * @smid: system request message index
1082 *
1083 * Returns the smid stored scmd pointer.
1084 * Then will dereference the stored scmd pointer.
1085 */
1086static inline struct scsi_cmnd *
1087__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
1088 u16 smid)
1089{
1090 struct scsi_cmnd *scmd = NULL;
1091
1092 swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
1093
1094 return scmd;
1095}
1096
1097/**
1078 * _scsih_scsi_lookup_get_clear - returns scmd entry 1098 * _scsih_scsi_lookup_get_clear - returns scmd entry
1079 * @ioc: per adapter object 1099 * @ioc: per adapter object
1080 * @smid: system request message index 1100 * @smid: system request message index
@@ -1089,8 +1109,7 @@ _scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1089 struct scsi_cmnd *scmd; 1109 struct scsi_cmnd *scmd;
1090 1110
1091 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1111 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1092 scmd = ioc->scsi_lookup[smid - 1].scmd; 1112 scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
1093 ioc->scsi_lookup[smid - 1].scmd = NULL;
1094 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1113 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1095 1114
1096 return scmd; 1115 return scmd;
@@ -4661,7 +4680,13 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4661 unsigned int sector_sz; 4680 unsigned int sector_sz;
4662 4681
4663 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 4682 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4664 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); 4683
4684 if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
4685 ioc->got_task_abort_from_ioctl)
4686 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
4687 else
4688 scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
4689
4665 if (scmd == NULL) 4690 if (scmd == NULL)
4666 return 1; 4691 return 1;
4667 4692
@@ -8044,15 +8069,24 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
8044 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 8069 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
8045 ActiveCableEventData = 8070 ActiveCableEventData =
8046 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 8071 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
8047 if (ActiveCableEventData->ReasonCode == 8072 switch (ActiveCableEventData->ReasonCode) {
8048 MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) { 8073 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8049 pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", 8074 pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable"
8050 ioc->name, ActiveCableEventData->ReceptacleID); 8075 " requires %d mW of power\n", ioc->name,
8051 pr_info("cannot be powered and devices connected to this active cable"); 8076 ActiveCableEventData->ReceptacleID,
8052 pr_info("will not be seen. This active cable"); 8077 ActiveCableEventData->ActiveCablePowerRequirement);
8053 pr_info("requires %d mW of power", 8078 pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected"
8054 ActiveCableEventData->ActiveCablePowerRequirement); 8079 " to this active cable will not be seen\n",
8080 ioc->name, ActiveCableEventData->ReceptacleID);
8081 break;
8082
8083 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8084 pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable",
8085 ioc->name, ActiveCableEventData->ReceptacleID);
8086 pr_notice(" is not running at an optimal speed(12 Gb/s)\n");
8087 break;
8055 } 8088 }
8089
8056 break; 8090 break;
8057 8091
8058 default: /* ignore the rest */ 8092 default: /* ignore the rest */
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 39285070f3b5..247df5e79b71 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2225,15 +2225,12 @@ static struct scsi_host_template mvumi_template = {
2225 .name = "Marvell Storage Controller", 2225 .name = "Marvell Storage Controller",
2226 .slave_configure = mvumi_slave_configure, 2226 .slave_configure = mvumi_slave_configure,
2227 .queuecommand = mvumi_queue_command, 2227 .queuecommand = mvumi_queue_command,
2228 .eh_timed_out = mvumi_timed_out,
2228 .eh_host_reset_handler = mvumi_host_reset, 2229 .eh_host_reset_handler = mvumi_host_reset,
2229 .bios_param = mvumi_bios_param, 2230 .bios_param = mvumi_bios_param,
2230 .this_id = -1, 2231 .this_id = -1,
2231}; 2232};
2232 2233
2233static struct scsi_transport_template mvumi_transport_template = {
2234 .eh_timed_out = mvumi_timed_out,
2235};
2236
2237static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) 2234static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2238{ 2235{
2239 void *base = NULL; 2236 void *base = NULL;
@@ -2451,7 +2448,6 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
2451 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; 2448 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2452 host->max_id = mhba->max_target_id; 2449 host->max_id = mhba->max_target_id;
2453 host->max_cmd_len = MAX_COMMAND_SIZE; 2450 host->max_cmd_len = MAX_COMMAND_SIZE;
2454 host->transportt = &mvumi_transport_template;
2455 2451
2456 ret = scsi_add_host(host, &mhba->pdev->dev); 2452 ret = scsi_add_host(host, &mhba->pdev->dev);
2457 if (ret) { 2453 if (ret) {
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 9fc675f57e33..417368ccb686 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -888,7 +888,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
888 u32 i = 0, j = 0; 888 u32 i = 0, j = 0;
889 u32 number_of_intr; 889 u32 number_of_intr;
890 int flag = 0; 890 int flag = 0;
891 u32 max_entry;
892 int rc; 891 int rc;
893 static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; 892 static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
894 893
@@ -900,18 +899,14 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
900 flag &= ~IRQF_SHARED; 899 flag &= ~IRQF_SHARED;
901 } 900 }
902 901
903 max_entry = sizeof(pm8001_ha->msix_entries) / 902 rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
904 sizeof(pm8001_ha->msix_entries[0]); 903 number_of_intr, PCI_IRQ_MSIX);
905 for (i = 0; i < max_entry ; i++) 904 if (rc < 0)
906 pm8001_ha->msix_entries[i].entry = i;
907 rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries,
908 number_of_intr);
909 pm8001_ha->number_of_intr = number_of_intr;
910 if (rc)
911 return rc; 905 return rc;
906 pm8001_ha->number_of_intr = number_of_intr;
912 907
913 PM8001_INIT_DBG(pm8001_ha, pm8001_printk( 908 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
914 "pci_enable_msix_exact request ret:%d no of intr %d\n", 909 "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
915 rc, pm8001_ha->number_of_intr)); 910 rc, pm8001_ha->number_of_intr));
916 911
917 for (i = 0; i < number_of_intr; i++) { 912 for (i = 0; i < number_of_intr; i++) {
@@ -920,15 +915,15 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
920 pm8001_ha->irq_vector[i].irq_id = i; 915 pm8001_ha->irq_vector[i].irq_id = i;
921 pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; 916 pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
922 917
923 rc = request_irq(pm8001_ha->msix_entries[i].vector, 918 rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
924 pm8001_interrupt_handler_msix, flag, 919 pm8001_interrupt_handler_msix, flag,
925 intr_drvname[i], &(pm8001_ha->irq_vector[i])); 920 intr_drvname[i], &(pm8001_ha->irq_vector[i]));
926 if (rc) { 921 if (rc) {
927 for (j = 0; j < i; j++) { 922 for (j = 0; j < i; j++) {
928 free_irq(pm8001_ha->msix_entries[j].vector, 923 free_irq(pci_irq_vector(pm8001_ha->pdev, i),
929 &(pm8001_ha->irq_vector[i])); 924 &(pm8001_ha->irq_vector[i]));
930 } 925 }
931 pci_disable_msix(pm8001_ha->pdev); 926 pci_free_irq_vectors(pm8001_ha->pdev);
932 break; 927 break;
933 } 928 }
934 } 929 }
@@ -1102,11 +1097,10 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
1102 1097
1103#ifdef PM8001_USE_MSIX 1098#ifdef PM8001_USE_MSIX
1104 for (i = 0; i < pm8001_ha->number_of_intr; i++) 1099 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1105 synchronize_irq(pm8001_ha->msix_entries[i].vector); 1100 synchronize_irq(pci_irq_vector(pdev, i));
1106 for (i = 0; i < pm8001_ha->number_of_intr; i++) 1101 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1107 free_irq(pm8001_ha->msix_entries[i].vector, 1102 free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
1108 &(pm8001_ha->irq_vector[i])); 1103 pci_free_irq_vectors(pdev);
1109 pci_disable_msix(pdev);
1110#else 1104#else
1111 free_irq(pm8001_ha->irq, sha); 1105 free_irq(pm8001_ha->irq, sha);
1112#endif 1106#endif
@@ -1152,11 +1146,10 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1152 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); 1146 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
1153#ifdef PM8001_USE_MSIX 1147#ifdef PM8001_USE_MSIX
1154 for (i = 0; i < pm8001_ha->number_of_intr; i++) 1148 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1155 synchronize_irq(pm8001_ha->msix_entries[i].vector); 1149 synchronize_irq(pci_irq_vector(pdev, i));
1156 for (i = 0; i < pm8001_ha->number_of_intr; i++) 1150 for (i = 0; i < pm8001_ha->number_of_intr; i++)
1157 free_irq(pm8001_ha->msix_entries[i].vector, 1151 free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
1158 &(pm8001_ha->irq_vector[i])); 1152 pci_free_irq_vectors(pdev);
1159 pci_disable_msix(pdev);
1160#else 1153#else
1161 free_irq(pm8001_ha->irq, sha); 1154 free_irq(pm8001_ha->irq, sha);
1162#endif 1155#endif
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6628cc38316c..e81a8fa7ef1a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -521,8 +521,6 @@ struct pm8001_hba_info {
521 struct pm8001_device *devices; 521 struct pm8001_device *devices;
522 struct pm8001_ccb_info *ccb_info; 522 struct pm8001_ccb_info *ccb_info;
523#ifdef PM8001_USE_MSIX 523#ifdef PM8001_USE_MSIX
524 struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC];
525 /*for msi-x interrupt*/
526 int number_of_intr;/*will be used in remove()*/ 524 int number_of_intr;/*will be used in remove()*/
527#endif 525#endif
528#ifdef PM8001_USE_TASKLET 526#ifdef PM8001_USE_TASKLET
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 337982cf3d63..49e70a383afa 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4587,16 +4587,14 @@ static void pmcraid_tasklet_function(unsigned long instance)
4587static 4587static
4588void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance) 4588void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4589{ 4589{
4590 struct pci_dev *pdev = pinstance->pdev;
4590 int i; 4591 int i;
4591 4592
4592 for (i = 0; i < pinstance->num_hrrq; i++) 4593 for (i = 0; i < pinstance->num_hrrq; i++)
4593 free_irq(pinstance->hrrq_vector[i].vector, 4594 free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
4594 &(pinstance->hrrq_vector[i]));
4595 4595
4596 if (pinstance->interrupt_mode) { 4596 pinstance->interrupt_mode = 0;
4597 pci_disable_msix(pinstance->pdev); 4597 pci_free_irq_vectors(pdev);
4598 pinstance->interrupt_mode = 0;
4599 }
4600} 4598}
4601 4599
4602/** 4600/**
@@ -4609,60 +4607,52 @@ void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4609static int 4607static int
4610pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) 4608pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4611{ 4609{
4612 int rc;
4613 struct pci_dev *pdev = pinstance->pdev; 4610 struct pci_dev *pdev = pinstance->pdev;
4611 unsigned int irq_flag = PCI_IRQ_LEGACY, flag;
4612 int num_hrrq, rc, i;
4613 irq_handler_t isr;
4614 4614
4615 if ((pmcraid_enable_msix) && 4615 if (pmcraid_enable_msix)
4616 (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) { 4616 irq_flag |= PCI_IRQ_MSIX;
4617 int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
4618 struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
4619 int i;
4620 for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
4621 entries[i].entry = i;
4622
4623 num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
4624 if (num_hrrq < 0)
4625 goto pmcraid_isr_legacy;
4626
4627 for (i = 0; i < num_hrrq; i++) {
4628 pinstance->hrrq_vector[i].hrrq_id = i;
4629 pinstance->hrrq_vector[i].drv_inst = pinstance;
4630 pinstance->hrrq_vector[i].vector = entries[i].vector;
4631 rc = request_irq(pinstance->hrrq_vector[i].vector,
4632 pmcraid_isr_msix, 0,
4633 PMCRAID_DRIVER_NAME,
4634 &(pinstance->hrrq_vector[i]));
4635
4636 if (rc) {
4637 int j;
4638 for (j = 0; j < i; j++)
4639 free_irq(entries[j].vector,
4640 &(pinstance->hrrq_vector[j]));
4641 pci_disable_msix(pdev);
4642 goto pmcraid_isr_legacy;
4643 }
4644 }
4645 4617
4646 pinstance->num_hrrq = num_hrrq; 4618 num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS,
4619 irq_flag);
4620 if (num_hrrq < 0)
4621 return num_hrrq;
4622
4623 if (pdev->msix_enabled) {
4624 flag = 0;
4625 isr = pmcraid_isr_msix;
4626 } else {
4627 flag = IRQF_SHARED;
4628 isr = pmcraid_isr;
4629 }
4630
4631 for (i = 0; i < num_hrrq; i++) {
4632 struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i];
4633
4634 vec->hrrq_id = i;
4635 vec->drv_inst = pinstance;
4636 rc = request_irq(pci_irq_vector(pdev, i), isr, flag,
4637 PMCRAID_DRIVER_NAME, vec);
4638 if (rc)
4639 goto out_unwind;
4640 }
4641
4642 pinstance->num_hrrq = num_hrrq;
4643 if (pdev->msix_enabled) {
4647 pinstance->interrupt_mode = 1; 4644 pinstance->interrupt_mode = 1;
4648 iowrite32(DOORBELL_INTR_MODE_MSIX, 4645 iowrite32(DOORBELL_INTR_MODE_MSIX,
4649 pinstance->int_regs.host_ioa_interrupt_reg); 4646 pinstance->int_regs.host_ioa_interrupt_reg);
4650 ioread32(pinstance->int_regs.host_ioa_interrupt_reg); 4647 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
4651 goto pmcraid_isr_out;
4652 } 4648 }
4653 4649
4654pmcraid_isr_legacy: 4650 return 0;
4655 /* If MSI-X registration failed fallback to legacy mode, where 4651
4656 * only one hrrq entry will be used 4652out_unwind:
4657 */ 4653 while (--i > 0)
4658 pinstance->hrrq_vector[0].hrrq_id = 0; 4654 free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
4659 pinstance->hrrq_vector[0].drv_inst = pinstance; 4655 pci_free_irq_vectors(pdev);
4660 pinstance->hrrq_vector[0].vector = pdev->irq;
4661 pinstance->num_hrrq = 1;
4662
4663 rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
4664 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
4665pmcraid_isr_out:
4666 return rc; 4656 return rc;
4667} 4657}
4668 4658
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index e1d150f3fd4d..568b18a2f47d 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -628,7 +628,6 @@ struct pmcraid_interrupts {
628/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */ 628/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
629struct pmcraid_isr_param { 629struct pmcraid_isr_param {
630 struct pmcraid_instance *drv_inst; 630 struct pmcraid_instance *drv_inst;
631 u16 vector; /* allocated msi-x vector */
632 u8 hrrq_id; /* hrrq entry index */ 631 u8 hrrq_id; /* hrrq entry index */
633}; 632};
634 633
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
index 2bdedb9c39bc..8fd28b056f73 100644
--- a/drivers/scsi/qedi/qedi_dbg.c
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -52,7 +52,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
52 vaf.va = &va; 52 vaf.va = &va;
53 53
54 if (!(qedi_dbg_log & QEDI_LOG_WARN)) 54 if (!(qedi_dbg_log & QEDI_LOG_WARN))
55 return; 55 goto ret;
56 56
57 if (likely(qedi) && likely(qedi->pdev)) 57 if (likely(qedi) && likely(qedi->pdev))
58 pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), 58 pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
@@ -60,6 +60,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
60 else 60 else
61 pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); 61 pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
62 62
63ret:
63 va_end(va); 64 va_end(va);
64} 65}
65 66
@@ -80,7 +81,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
80 vaf.va = &va; 81 vaf.va = &va;
81 82
82 if (!(qedi_dbg_log & QEDI_LOG_NOTICE)) 83 if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
83 return; 84 goto ret;
84 85
85 if (likely(qedi) && likely(qedi->pdev)) 86 if (likely(qedi) && likely(qedi->pdev))
86 pr_notice("[%s]:[%s:%d]:%d: %pV", 87 pr_notice("[%s]:[%s:%d]:%d: %pV",
@@ -89,6 +90,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
89 else 90 else
90 pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); 91 pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
91 92
93ret:
92 va_end(va); 94 va_end(va);
93} 95}
94 96
@@ -109,7 +111,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
109 vaf.va = &va; 111 vaf.va = &va;
110 112
111 if (!(qedi_dbg_log & level)) 113 if (!(qedi_dbg_log & level))
112 return; 114 goto ret;
113 115
114 if (likely(qedi) && likely(qedi->pdev)) 116 if (likely(qedi) && likely(qedi->pdev))
115 pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev), 117 pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
@@ -117,6 +119,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
117 else 119 else
118 pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf); 120 pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
119 121
122ret:
120 va_end(va); 123 va_end(va);
121} 124}
122 125
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index d6a205433b66..b9f79d36142d 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -48,6 +48,7 @@ struct scsi_host_template qedi_host_template = {
48 .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver", 48 .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
49 .proc_name = QEDI_MODULE_NAME, 49 .proc_name = QEDI_MODULE_NAME,
50 .queuecommand = iscsi_queuecommand, 50 .queuecommand = iscsi_queuecommand,
51 .eh_timed_out = iscsi_eh_cmd_timed_out,
51 .eh_abort_handler = iscsi_eh_abort, 52 .eh_abort_handler = iscsi_eh_abort,
52 .eh_device_reset_handler = iscsi_eh_device_reset, 53 .eh_device_reset_handler = iscsi_eh_device_reset,
53 .eh_target_reset_handler = iscsi_eh_recover_target, 54 .eh_target_reset_handler = iscsi_eh_recover_target,
@@ -453,13 +454,9 @@ static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
453 if (rval) { 454 if (rval) {
454 rval = -ENXIO; 455 rval = -ENXIO;
455 QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n"); 456 QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
456 goto update_conn_err;
457 } 457 }
458 458
459 kfree(conn_info); 459 kfree(conn_info);
460 rval = 0;
461
462update_conn_err:
463 return rval; 460 return rval;
464} 461}
465 462
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5b1287a63c49..2f14adfab018 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2248,7 +2248,7 @@ struct ct_fdmiv2_hba_attr {
2248 uint32_t num_ports; 2248 uint32_t num_ports;
2249 uint8_t fabric_name[WWN_SIZE]; 2249 uint8_t fabric_name[WWN_SIZE];
2250 uint8_t bios_name[32]; 2250 uint8_t bios_name[32];
2251 uint8_t vendor_indentifer[8]; 2251 uint8_t vendor_identifier[8];
2252 } a; 2252 } a;
2253}; 2253};
2254 2254
@@ -2423,7 +2423,7 @@ struct ct_sns_req {
2423 } rsnn_nn; 2423 } rsnn_nn;
2424 2424
2425 struct { 2425 struct {
2426 uint8_t hba_indentifier[8]; 2426 uint8_t hba_identifier[8];
2427 } ghat; 2427 } ghat;
2428 2428
2429 struct { 2429 struct {
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 94e8a8592f69..ee3df8794806 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1939,15 +1939,15 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1939 /* Vendor Identifier */ 1939 /* Vendor Identifier */
1940 eiter = entries + size; 1940 eiter = entries + size;
1941 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER); 1941 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
1942 snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer), 1942 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
1943 "%s", "QLGC"); 1943 "%s", "QLGC");
1944 alen = strlen(eiter->a.vendor_indentifer); 1944 alen = strlen(eiter->a.vendor_identifier);
1945 alen += 4 - (alen & 3); 1945 alen += 4 - (alen & 3);
1946 eiter->len = cpu_to_be16(4 + alen); 1946 eiter->len = cpu_to_be16(4 + alen);
1947 size += 4 + alen; 1947 size += 4 + alen;
1948 1948
1949 ql_dbg(ql_dbg_disc, vha, 0x20b1, 1949 ql_dbg(ql_dbg_disc, vha, 0x20b1,
1950 "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer); 1950 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
1951 1951
1952 /* Update MS request size. */ 1952 /* Update MS request size. */
1953 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1953 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9281bf47cbed..edc2264db45b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2997,14 +2997,14 @@ struct qla_init_msix_entry {
2997 irq_handler_t handler; 2997 irq_handler_t handler;
2998}; 2998};
2999 2999
3000static struct qla_init_msix_entry msix_entries[] = { 3000static const struct qla_init_msix_entry msix_entries[] = {
3001 { "qla2xxx (default)", qla24xx_msix_default }, 3001 { "qla2xxx (default)", qla24xx_msix_default },
3002 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 3002 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3003 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 3003 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3004 { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q }, 3004 { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
3005}; 3005};
3006 3006
3007static struct qla_init_msix_entry qla82xx_msix_entries[] = { 3007static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3008 { "qla2xxx (default)", qla82xx_msix_default }, 3008 { "qla2xxx (default)", qla82xx_msix_default },
3009 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3009 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3010}; 3010};
@@ -3078,7 +3078,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3078 qentry->handle = rsp; 3078 qentry->handle = rsp;
3079 rsp->msix = qentry; 3079 rsp->msix = qentry;
3080 scnprintf(qentry->name, sizeof(qentry->name), 3080 scnprintf(qentry->name, sizeof(qentry->name),
3081 msix_entries[i].name); 3081 "%s", msix_entries[i].name);
3082 if (IS_P3P_TYPE(ha)) 3082 if (IS_P3P_TYPE(ha))
3083 ret = request_irq(qentry->vector, 3083 ret = request_irq(qentry->vector,
3084 qla82xx_msix_entries[i].handler, 3084 qla82xx_msix_entries[i].handler,
@@ -3102,7 +3102,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3102 rsp->msix = qentry; 3102 rsp->msix = qentry;
3103 qentry->handle = rsp; 3103 qentry->handle = rsp;
3104 scnprintf(qentry->name, sizeof(qentry->name), 3104 scnprintf(qentry->name, sizeof(qentry->name),
3105 msix_entries[QLA_ATIO_VECTOR].name); 3105 "%s", msix_entries[QLA_ATIO_VECTOR].name);
3106 qentry->in_use = 1; 3106 qentry->in_use = 1;
3107 ret = request_irq(qentry->vector, 3107 ret = request_irq(qentry->vector,
3108 msix_entries[QLA_ATIO_VECTOR].handler, 3108 msix_entries[QLA_ATIO_VECTOR].handler,
@@ -3271,7 +3271,7 @@ free_irqs:
3271int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 3271int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3272 struct qla_msix_entry *msix, int vector_type) 3272 struct qla_msix_entry *msix, int vector_type)
3273{ 3273{
3274 struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 3274 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3275 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3275 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3276 int ret; 3276 int ret;
3277 3277
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 40660461a4b5..d01c90c7dd04 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -262,6 +262,7 @@ struct scsi_host_template qla2xxx_driver_template = {
262 .name = QLA2XXX_DRIVER_NAME, 262 .name = QLA2XXX_DRIVER_NAME,
263 .queuecommand = qla2xxx_queuecommand, 263 .queuecommand = qla2xxx_queuecommand,
264 264
265 .eh_timed_out = fc_eh_timed_out,
265 .eh_abort_handler = qla2xxx_eh_abort, 266 .eh_abort_handler = qla2xxx_eh_abort,
266 .eh_device_reset_handler = qla2xxx_eh_device_reset, 267 .eh_device_reset_handler = qla2xxx_eh_device_reset,
267 .eh_target_reset_handler = qla2xxx_eh_target_reset, 268 .eh_target_reset_handler = qla2xxx_eh_target_reset,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index aeebefb1e9f8..fc233717355f 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -408,9 +408,6 @@ struct qla4_8xxx_legacy_intr_set {
408}; 408};
409 409
410/* MSI-X Support */ 410/* MSI-X Support */
411
412#define QLA_MSIX_DEFAULT 0
413#define QLA_MSIX_RSP_Q 1
414#define QLA_MSIX_ENTRIES 2 411#define QLA_MSIX_ENTRIES 2
415 412
416/* 413/*
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 9fbb33fc90c7..ac52150d1569 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -9539,15 +9539,15 @@ exit_host_reset:
9539 * driver calls the following device driver's callbacks 9539 * driver calls the following device driver's callbacks
9540 * 9540 *
9541 * - Fatal Errors - link_reset 9541 * - Fatal Errors - link_reset
9542 * - Non-Fatal Errors - driver's pci_error_detected() which 9542 * - Non-Fatal Errors - driver's error_detected() which
9543 * returns CAN_RECOVER, NEED_RESET or DISCONNECT. 9543 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
9544 * 9544 *
9545 * PCI AER driver calls 9545 * PCI AER driver calls
9546 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled 9546 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled()
9547 * returns RECOVERED or NEED_RESET if fw_hung 9547 * returns RECOVERED or NEED_RESET if fw_hung
9548 * NEED_RESET - driver's slot_reset() 9548 * NEED_RESET - driver's slot_reset()
9549 * DISCONNECT - device is dead & cannot recover 9549 * DISCONNECT - device is dead & cannot recover
9550 * RECOVERED - driver's pci_resume() 9550 * RECOVERED - driver's resume()
9551 */ 9551 */
9552static pci_ers_result_t 9552static pci_ers_result_t
9553qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9553qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 03051e12a072..17249c3650fe 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -125,6 +125,7 @@ static const char *sdebug_version_date = "20160430";
125#define DEF_OPTS 0 125#define DEF_OPTS 0
126#define DEF_OPT_BLKS 1024 126#define DEF_OPT_BLKS 1024
127#define DEF_PHYSBLK_EXP 0 127#define DEF_PHYSBLK_EXP 0
128#define DEF_OPT_XFERLEN_EXP 0
128#define DEF_PTYPE TYPE_DISK 129#define DEF_PTYPE TYPE_DISK
129#define DEF_REMOVABLE false 130#define DEF_REMOVABLE false
130#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */ 131#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
@@ -590,6 +591,7 @@ static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
590static int sdebug_opt_blks = DEF_OPT_BLKS; 591static int sdebug_opt_blks = DEF_OPT_BLKS;
591static int sdebug_opts = DEF_OPTS; 592static int sdebug_opts = DEF_OPTS;
592static int sdebug_physblk_exp = DEF_PHYSBLK_EXP; 593static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
594static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
593static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */ 595static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
594static int sdebug_scsi_level = DEF_SCSI_LEVEL; 596static int sdebug_scsi_level = DEF_SCSI_LEVEL;
595static int sdebug_sector_size = DEF_SECTOR_SIZE; 597static int sdebug_sector_size = DEF_SECTOR_SIZE;
@@ -1205,7 +1207,11 @@ static int inquiry_vpd_b0(unsigned char *arr)
1205 memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); 1207 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1206 1208
1207 /* Optimal transfer length granularity */ 1209 /* Optimal transfer length granularity */
1208 gran = 1 << sdebug_physblk_exp; 1210 if (sdebug_opt_xferlen_exp != 0 &&
1211 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1212 gran = 1 << sdebug_opt_xferlen_exp;
1213 else
1214 gran = 1 << sdebug_physblk_exp;
1209 put_unaligned_be16(gran, arr + 2); 1215 put_unaligned_be16(gran, arr + 2);
1210 1216
1211 /* Maximum Transfer Length */ 1217 /* Maximum Transfer Length */
@@ -4161,6 +4167,7 @@ module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4161module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO); 4167module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4162module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR); 4168module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4163module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO); 4169module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4170module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4164module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR); 4171module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4165module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR); 4172module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4166module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO); 4173module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
@@ -4212,6 +4219,7 @@ MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4212MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)"); 4219MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4213MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); 4220MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4214MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 4221MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4222MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4215MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 4223MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4216MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); 4224MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4217MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])"); 4225MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9e82fa5715bc..f2cafae150bc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -279,9 +279,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
279 if (host->eh_deadline != -1 && !host->last_reset) 279 if (host->eh_deadline != -1 && !host->last_reset)
280 host->last_reset = jiffies; 280 host->last_reset = jiffies;
281 281
282 if (host->transportt->eh_timed_out) 282 if (host->hostt->eh_timed_out)
283 rtn = host->transportt->eh_timed_out(scmd);
284 else if (host->hostt->eh_timed_out)
285 rtn = host->hostt->eh_timed_out(scmd); 283 rtn = host->hostt->eh_timed_out(scmd);
286 284
287 if (rtn == BLK_EH_NOT_HANDLED) { 285 if (rtn == BLK_EH_NOT_HANDLED) {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 13dcb9ba823c..2d753c93e07a 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2055,7 +2055,7 @@ static int fc_vport_match(struct attribute_container *cont,
2055 2055
2056 2056
2057/** 2057/**
2058 * fc_timed_out - FC Transport I/O timeout intercept handler 2058 * fc_eh_timed_out - FC Transport I/O timeout intercept handler
2059 * @scmd: The SCSI command which timed out 2059 * @scmd: The SCSI command which timed out
2060 * 2060 *
2061 * This routine protects against error handlers getting invoked while a 2061 * This routine protects against error handlers getting invoked while a
@@ -2076,8 +2076,8 @@ static int fc_vport_match(struct attribute_container *cont,
2076 * Notes: 2076 * Notes:
2077 * This routine assumes no locks are held on entry. 2077 * This routine assumes no locks are held on entry.
2078 */ 2078 */
2079static enum blk_eh_timer_return 2079enum blk_eh_timer_return
2080fc_timed_out(struct scsi_cmnd *scmd) 2080fc_eh_timed_out(struct scsi_cmnd *scmd)
2081{ 2081{
2082 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); 2082 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
2083 2083
@@ -2086,6 +2086,7 @@ fc_timed_out(struct scsi_cmnd *scmd)
2086 2086
2087 return BLK_EH_NOT_HANDLED; 2087 return BLK_EH_NOT_HANDLED;
2088} 2088}
2089EXPORT_SYMBOL(fc_eh_timed_out);
2089 2090
2090/* 2091/*
2091 * Called by fc_user_scan to locate an rport on the shost that 2092 * Called by fc_user_scan to locate an rport on the shost that
@@ -2159,19 +2160,6 @@ fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
2159 return 0; 2160 return 0;
2160} 2161}
2161 2162
2162static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
2163 int result)
2164{
2165 struct fc_internal *i = to_fc_internal(shost->transportt);
2166 return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
2167}
2168
2169static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
2170{
2171 struct fc_internal *i = to_fc_internal(shost->transportt);
2172 return i->f->it_nexus_response(shost, nexus, result);
2173}
2174
2175struct scsi_transport_template * 2163struct scsi_transport_template *
2176fc_attach_transport(struct fc_function_template *ft) 2164fc_attach_transport(struct fc_function_template *ft)
2177{ 2165{
@@ -2211,14 +2199,8 @@ fc_attach_transport(struct fc_function_template *ft)
2211 /* Transport uses the shost workq for scsi scanning */ 2199 /* Transport uses the shost workq for scsi scanning */
2212 i->t.create_work_queue = 1; 2200 i->t.create_work_queue = 1;
2213 2201
2214 i->t.eh_timed_out = fc_timed_out;
2215
2216 i->t.user_scan = fc_user_scan; 2202 i->t.user_scan = fc_user_scan;
2217 2203
2218 /* target-mode drivers' functions */
2219 i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
2220 i->t.it_nexus_response = fc_it_nexus_response;
2221
2222 /* 2204 /*
2223 * Setup SCSI Target Attributes. 2205 * Setup SCSI Target Attributes.
2224 */ 2206 */
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index b87a78673f65..3c5d89852e9f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(srp_reconnect_rport);
591 * Note: This function is called from soft-IRQ context and with the request 591 * Note: This function is called from soft-IRQ context and with the request
592 * queue lock held. 592 * queue lock held.
593 */ 593 */
594static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) 594enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
595{ 595{
596 struct scsi_device *sdev = scmd->device; 596 struct scsi_device *sdev = scmd->device;
597 struct Scsi_Host *shost = sdev->host; 597 struct Scsi_Host *shost = sdev->host;
@@ -603,6 +603,7 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
603 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? 603 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
604 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 604 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
605} 605}
606EXPORT_SYMBOL(srp_timed_out);
606 607
607static void srp_rport_release(struct device *dev) 608static void srp_rport_release(struct device *dev)
608{ 609{
@@ -793,19 +794,6 @@ void srp_stop_rport_timers(struct srp_rport *rport)
793} 794}
794EXPORT_SYMBOL_GPL(srp_stop_rport_timers); 795EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
795 796
796static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
797 int result)
798{
799 struct srp_internal *i = to_srp_internal(shost->transportt);
800 return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
801}
802
803static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
804{
805 struct srp_internal *i = to_srp_internal(shost->transportt);
806 return i->f->it_nexus_response(shost, nexus, result);
807}
808
809/** 797/**
810 * srp_attach_transport - instantiate SRP transport template 798 * srp_attach_transport - instantiate SRP transport template
811 * @ft: SRP transport class function template 799 * @ft: SRP transport class function template
@@ -820,11 +808,6 @@ srp_attach_transport(struct srp_function_template *ft)
820 if (!i) 808 if (!i)
821 return NULL; 809 return NULL;
822 810
823 i->t.eh_timed_out = srp_timed_out;
824
825 i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
826 i->t.it_nexus_response = srp_it_nexus_response;
827
828 i->t.host_size = sizeof(struct srp_host_attrs); 811 i->t.host_size = sizeof(struct srp_host_attrs);
829 i->t.host_attrs.ac.attrs = &i->host_attrs[0]; 812 i->t.host_attrs.ac.attrs = &i->host_attrs[0];
830 i->t.host_attrs.ac.class = &srp_host_class.class; 813 i->t.host_attrs.ac.class = &srp_host_class.class;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 40b4038c019e..cb6e68dd6df0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -703,7 +703,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
703 703
704/** 704/**
705 * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device 705 * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
706 * @sdp: scsi device to operate one 706 * @sdp: scsi device to operate on
707 * @rq: Request to prepare 707 * @rq: Request to prepare
708 * 708 *
709 * Will issue either UNMAP or WRITE SAME(16) depending on preference 709 * Will issue either UNMAP or WRITE SAME(16) depending on preference
@@ -3226,7 +3226,7 @@ static int sd_probe(struct device *dev)
3226 * sd_remove - called whenever a scsi disk (previously recognized by 3226 * sd_remove - called whenever a scsi disk (previously recognized by
3227 * sd_probe) is detached from the system. It is called (potentially 3227 * sd_probe) is detached from the system. It is called (potentially
3228 * multiple times) during sd module unload. 3228 * multiple times) during sd module unload.
3229 * @sdp: pointer to mid level scsi device object 3229 * @dev: pointer to device object
3230 * 3230 *
3231 * Note: this function is invoked from the scsi mid-level. 3231 * Note: this function is invoked from the scsi mid-level.
3232 * This function potentially frees up a device name (e.g. /dev/sdc) 3232 * This function potentially frees up a device name (e.g. /dev/sdc)
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index 8ed778d4dbb9..de0ab5fc8474 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -299,7 +299,6 @@ struct snic {
299 299
300 /* pci related */ 300 /* pci related */
301 struct pci_dev *pdev; 301 struct pci_dev *pdev;
302 struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
303 struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX]; 302 struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
304 303
305 /* io related info */ 304 /* io related info */
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index f552003128c6..d859501e4ccd 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -93,7 +93,7 @@ snic_free_intr(struct snic *snic)
93 /* ONLY interrupt mode MSIX is supported */ 93 /* ONLY interrupt mode MSIX is supported */
94 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { 94 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
95 if (snic->msix[i].requested) { 95 if (snic->msix[i].requested) {
96 free_irq(snic->msix_entry[i].vector, 96 free_irq(pci_irq_vector(snic->pdev, i),
97 snic->msix[i].devid); 97 snic->msix[i].devid);
98 } 98 }
99 } 99 }
@@ -134,7 +134,7 @@ snic_request_intr(struct snic *snic)
134 snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic; 134 snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
135 135
136 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { 136 for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
137 ret = request_irq(snic->msix_entry[i].vector, 137 ret = request_irq(pci_irq_vector(snic->pdev, i),
138 snic->msix[i].isr, 138 snic->msix[i].isr,
139 0, 139 0,
140 snic->msix[i].devname, 140 snic->msix[i].devname,
@@ -158,47 +158,37 @@ snic_set_intr_mode(struct snic *snic)
158{ 158{
159 unsigned int n = ARRAY_SIZE(snic->wq); 159 unsigned int n = ARRAY_SIZE(snic->wq);
160 unsigned int m = SNIC_CQ_IO_CMPL_MAX; 160 unsigned int m = SNIC_CQ_IO_CMPL_MAX;
161 unsigned int i; 161 unsigned int vecs = n + m + 1;
162 162
163 /* 163 /*
164 * We need n WQs, m CQs, and n+m+1 INTRs 164 * We need n WQs, m CQs, and n+m+1 INTRs
165 * (last INTR is used for WQ/CQ errors and notification area 165 * (last INTR is used for WQ/CQ errors and notification area
166 */ 166 */
167
168 BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) > 167 BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
169 ARRAY_SIZE(snic->intr)); 168 ARRAY_SIZE(snic->intr));
170 SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
171
172 for (i = 0; i < (n + m + 1); i++)
173 snic->msix_entry[i].entry = i;
174
175 if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
176 if (!pci_enable_msix(snic->pdev,
177 snic->msix_entry,
178 (n + m + 1))) {
179 snic->wq_count = n;
180 snic->cq_count = n + m;
181 snic->intr_count = n + m + 1;
182 snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
183
184 SNIC_ISR_DBG(snic->shost,
185 "Using MSI-X Interrupts\n");
186 svnic_dev_set_intr_mode(snic->vdev,
187 VNIC_DEV_INTR_MODE_MSIX);
188
189 return 0;
190 }
191 }
192 169
193 svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 170 if (snic->wq_count < n || snic->cq_count < n + m)
171 goto fail;
194 172
173 if (pci_alloc_irq_vectors(snic->pdev, vecs, vecs, PCI_IRQ_MSIX) < 0)
174 goto fail;
175
176 snic->wq_count = n;
177 snic->cq_count = n + m;
178 snic->intr_count = vecs;
179 snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
180
181 SNIC_ISR_DBG(snic->shost, "Using MSI-X Interrupts\n");
182 svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_MSIX);
183 return 0;
184fail:
185 svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
195 return -EINVAL; 186 return -EINVAL;
196} /* end of snic_set_intr_mode */ 187} /* end of snic_set_intr_mode */
197 188
198void 189void
199snic_clear_intr_mode(struct snic *snic) 190snic_clear_intr_mode(struct snic *snic)
200{ 191{
201 pci_disable_msix(snic->pdev); 192 pci_free_irq_vectors(snic->pdev);
202
203 svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX); 193 svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
204} 194}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 05526b71541b..585e54f6512c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -136,6 +136,8 @@ struct hv_fc_wwn_packet {
136#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 136#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000
137#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 137#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000
138 138
139#define SP_UNTAGGED ((unsigned char) ~0)
140#define SRB_SIMPLE_TAG_REQUEST 0x20
139 141
140/* 142/*
141 * Platform neutral description of a scsi request - 143 * Platform neutral description of a scsi request -
@@ -375,6 +377,7 @@ enum storvsc_request_type {
375#define SRB_STATUS_SUCCESS 0x01 377#define SRB_STATUS_SUCCESS 0x01
376#define SRB_STATUS_ABORTED 0x02 378#define SRB_STATUS_ABORTED 0x02
377#define SRB_STATUS_ERROR 0x04 379#define SRB_STATUS_ERROR 0x04
380#define SRB_STATUS_DATA_OVERRUN 0x12
378 381
379#define SRB_STATUS(status) \ 382#define SRB_STATUS(status) \
380 (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) 383 (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
@@ -458,6 +461,15 @@ struct storvsc_device {
458 * Max I/O, the device can support. 461 * Max I/O, the device can support.
459 */ 462 */
460 u32 max_transfer_bytes; 463 u32 max_transfer_bytes;
464 /*
465 * Number of sub-channels we will open.
466 */
467 u16 num_sc;
468 struct vmbus_channel **stor_chns;
469 /*
470 * Mask of CPUs bound to subchannels.
471 */
472 struct cpumask alloced_cpus;
461 /* Used for vsc/vsp channel reset process */ 473 /* Used for vsc/vsp channel reset process */
462 struct storvsc_cmd_request init_request; 474 struct storvsc_cmd_request init_request;
463 struct storvsc_cmd_request reset_request; 475 struct storvsc_cmd_request reset_request;
@@ -635,6 +647,11 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
635 (void *)&props, 647 (void *)&props,
636 sizeof(struct vmstorage_channel_properties), 648 sizeof(struct vmstorage_channel_properties),
637 storvsc_on_channel_callback, new_sc); 649 storvsc_on_channel_callback, new_sc);
650
651 if (new_sc->state == CHANNEL_OPENED_STATE) {
652 stor_device->stor_chns[new_sc->target_cpu] = new_sc;
653 cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
654 }
638} 655}
639 656
640static void handle_multichannel_storage(struct hv_device *device, int max_chns) 657static void handle_multichannel_storage(struct hv_device *device, int max_chns)
@@ -651,6 +668,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
651 if (!stor_device) 668 if (!stor_device)
652 return; 669 return;
653 670
671 stor_device->num_sc = num_sc;
654 request = &stor_device->init_request; 672 request = &stor_device->init_request;
655 vstor_packet = &request->vstor_packet; 673 vstor_packet = &request->vstor_packet;
656 674
@@ -838,6 +856,25 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
838 * support multi-channel. 856 * support multi-channel.
839 */ 857 */
840 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; 858 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
859
860 /*
861 * Allocate state to manage the sub-channels.
862 * We allocate an array based on the numbers of possible CPUs
863 * (Hyper-V does not support cpu online/offline).
864 * This Array will be sparseley populated with unique
865 * channels - primary + sub-channels.
866 * We will however populate all the slots to evenly distribute
867 * the load.
868 */
869 stor_device->stor_chns = kzalloc(sizeof(void *) * num_possible_cpus(),
870 GFP_KERNEL);
871 if (stor_device->stor_chns == NULL)
872 return -ENOMEM;
873
874 stor_device->stor_chns[device->channel->target_cpu] = device->channel;
875 cpumask_set_cpu(device->channel->target_cpu,
876 &stor_device->alloced_cpus);
877
841 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) { 878 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
842 if (vstor_packet->storage_channel_properties.flags & 879 if (vstor_packet->storage_channel_properties.flags &
843 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) 880 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
@@ -889,6 +926,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
889 switch (SRB_STATUS(vm_srb->srb_status)) { 926 switch (SRB_STATUS(vm_srb->srb_status)) {
890 case SRB_STATUS_ERROR: 927 case SRB_STATUS_ERROR:
891 /* 928 /*
929 * Let upper layer deal with error when
930 * sense message is present.
931 */
932
933 if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
934 break;
935 /*
892 * If there is an error; offline the device since all 936 * If there is an error; offline the device since all
893 * error recovery strategies would have already been 937 * error recovery strategies would have already been
894 * deployed on the host side. However, if the command 938 * deployed on the host side. However, if the command
@@ -953,6 +997,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
953 struct scsi_cmnd *scmnd = cmd_request->cmd; 997 struct scsi_cmnd *scmnd = cmd_request->cmd;
954 struct scsi_sense_hdr sense_hdr; 998 struct scsi_sense_hdr sense_hdr;
955 struct vmscsi_request *vm_srb; 999 struct vmscsi_request *vm_srb;
1000 u32 data_transfer_length;
956 struct Scsi_Host *host; 1001 struct Scsi_Host *host;
957 u32 payload_sz = cmd_request->payload_sz; 1002 u32 payload_sz = cmd_request->payload_sz;
958 void *payload = cmd_request->payload; 1003 void *payload = cmd_request->payload;
@@ -960,6 +1005,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
960 host = stor_dev->host; 1005 host = stor_dev->host;
961 1006
962 vm_srb = &cmd_request->vstor_packet.vm_srb; 1007 vm_srb = &cmd_request->vstor_packet.vm_srb;
1008 data_transfer_length = vm_srb->data_transfer_length;
963 1009
964 scmnd->result = vm_srb->scsi_status; 1010 scmnd->result = vm_srb->scsi_status;
965 1011
@@ -973,13 +1019,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
973 &sense_hdr); 1019 &sense_hdr);
974 } 1020 }
975 1021
976 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) 1022 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
977 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, 1023 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
978 sense_hdr.ascq); 1024 sense_hdr.ascq);
1025 /*
1026 * The Windows driver set data_transfer_length on
1027 * SRB_STATUS_DATA_OVERRUN. On other errors, this value
1028 * is untouched. In these cases we set it to 0.
1029 */
1030 if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
1031 data_transfer_length = 0;
1032 }
979 1033
980 scsi_set_resid(scmnd, 1034 scsi_set_resid(scmnd,
981 cmd_request->payload->range.len - 1035 cmd_request->payload->range.len - data_transfer_length);
982 vm_srb->data_transfer_length);
983 1036
984 scmnd->scsi_done(scmnd); 1037 scmnd->scsi_done(scmnd);
985 1038
@@ -1198,17 +1251,64 @@ static int storvsc_dev_remove(struct hv_device *device)
1198 /* Close the channel */ 1251 /* Close the channel */
1199 vmbus_close(device->channel); 1252 vmbus_close(device->channel);
1200 1253
1254 kfree(stor_device->stor_chns);
1201 kfree(stor_device); 1255 kfree(stor_device);
1202 return 0; 1256 return 0;
1203} 1257}
1204 1258
1259static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
1260 u16 q_num)
1261{
1262 u16 slot = 0;
1263 u16 hash_qnum;
1264 struct cpumask alloced_mask;
1265 int num_channels, tgt_cpu;
1266
1267 if (stor_device->num_sc == 0)
1268 return stor_device->device->channel;
1269
1270 /*
1271 * Our channel array is sparsley populated and we
1272 * initiated I/O on a processor/hw-q that does not
1273 * currently have a designated channel. Fix this.
1274 * The strategy is simple:
1275 * I. Ensure NUMA locality
1276 * II. Distribute evenly (best effort)
1277 * III. Mapping is persistent.
1278 */
1279
1280 cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
1281 cpumask_of_node(cpu_to_node(q_num)));
1282
1283 num_channels = cpumask_weight(&alloced_mask);
1284 if (num_channels == 0)
1285 return stor_device->device->channel;
1286
1287 hash_qnum = q_num;
1288 while (hash_qnum >= num_channels)
1289 hash_qnum -= num_channels;
1290
1291 for_each_cpu(tgt_cpu, &alloced_mask) {
1292 if (slot == hash_qnum)
1293 break;
1294 slot++;
1295 }
1296
1297 stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu];
1298
1299 return stor_device->stor_chns[q_num];
1300}
1301
1302
1205static int storvsc_do_io(struct hv_device *device, 1303static int storvsc_do_io(struct hv_device *device,
1206 struct storvsc_cmd_request *request) 1304 struct storvsc_cmd_request *request, u16 q_num)
1207{ 1305{
1208 struct storvsc_device *stor_device; 1306 struct storvsc_device *stor_device;
1209 struct vstor_packet *vstor_packet; 1307 struct vstor_packet *vstor_packet;
1210 struct vmbus_channel *outgoing_channel; 1308 struct vmbus_channel *outgoing_channel;
1211 int ret = 0; 1309 int ret = 0;
1310 struct cpumask alloced_mask;
1311 int tgt_cpu;
1212 1312
1213 vstor_packet = &request->vstor_packet; 1313 vstor_packet = &request->vstor_packet;
1214 stor_device = get_out_stor_device(device); 1314 stor_device = get_out_stor_device(device);
@@ -1222,7 +1322,26 @@ static int storvsc_do_io(struct hv_device *device,
1222 * Select an an appropriate channel to send the request out. 1322 * Select an an appropriate channel to send the request out.
1223 */ 1323 */
1224 1324
1225 outgoing_channel = vmbus_get_outgoing_channel(device->channel); 1325 if (stor_device->stor_chns[q_num] != NULL) {
1326 outgoing_channel = stor_device->stor_chns[q_num];
1327 if (outgoing_channel->target_cpu == smp_processor_id()) {
1328 /*
1329 * Ideally, we want to pick a different channel if
1330 * available on the same NUMA node.
1331 */
1332 cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
1333 cpumask_of_node(cpu_to_node(q_num)));
1334 for_each_cpu(tgt_cpu, &alloced_mask) {
1335 if (tgt_cpu != outgoing_channel->target_cpu) {
1336 outgoing_channel =
1337 stor_device->stor_chns[tgt_cpu];
1338 break;
1339 }
1340 }
1341 }
1342 } else {
1343 outgoing_channel = get_og_chn(stor_device, q_num);
1344 }
1226 1345
1227 1346
1228 vstor_packet->flags |= REQUEST_COMPLETION_FLAG; 1347 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
@@ -1267,8 +1386,6 @@ static int storvsc_do_io(struct hv_device *device,
1267static int storvsc_device_configure(struct scsi_device *sdevice) 1386static int storvsc_device_configure(struct scsi_device *sdevice)
1268{ 1387{
1269 1388
1270 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
1271
1272 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); 1389 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1273 1390
1274 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); 1391 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
@@ -1451,6 +1568,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1451 vm_srb->win8_extension.srb_flags |= 1568 vm_srb->win8_extension.srb_flags |=
1452 SRB_FLAGS_DISABLE_SYNCH_TRANSFER; 1569 SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
1453 1570
1571 if (scmnd->device->tagged_supported) {
1572 vm_srb->win8_extension.srb_flags |=
1573 (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
1574 vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
1575 vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
1576 }
1577
1454 /* Build the SRB */ 1578 /* Build the SRB */
1455 switch (scmnd->sc_data_direction) { 1579 switch (scmnd->sc_data_direction) {
1456 case DMA_TO_DEVICE: 1580 case DMA_TO_DEVICE:
@@ -1511,20 +1635,14 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1511 page_to_pfn(sg_page((cur_sgl))); 1635 page_to_pfn(sg_page((cur_sgl)));
1512 cur_sgl = sg_next(cur_sgl); 1636 cur_sgl = sg_next(cur_sgl);
1513 } 1637 }
1514
1515 } else if (scsi_sglist(scmnd)) {
1516 payload->range.len = length;
1517 payload->range.offset =
1518 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1519 payload->range.pfn_array[0] =
1520 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1521 } 1638 }
1522 1639
1523 cmd_request->payload = payload; 1640 cmd_request->payload = payload;
1524 cmd_request->payload_sz = payload_sz; 1641 cmd_request->payload_sz = payload_sz;
1525 1642
1526 /* Invokes the vsc to start an IO */ 1643 /* Invokes the vsc to start an IO */
1527 ret = storvsc_do_io(dev, cmd_request); 1644 ret = storvsc_do_io(dev, cmd_request, get_cpu());
1645 put_cpu();
1528 1646
1529 if (ret == -EAGAIN) { 1647 if (ret == -EAGAIN) {
1530 /* no more space */ 1648 /* no more space */
@@ -1550,6 +1668,7 @@ static struct scsi_host_template scsi_driver = {
1550 /* Make sure we dont get a sg segment crosses a page boundary */ 1668 /* Make sure we dont get a sg segment crosses a page boundary */
1551 .dma_boundary = PAGE_SIZE-1, 1669 .dma_boundary = PAGE_SIZE-1,
1552 .no_write_same = 1, 1670 .no_write_same = 1,
1671 .track_queue_depth = 1,
1553}; 1672};
1554 1673
1555enum { 1674enum {
@@ -1680,6 +1799,11 @@ static int storvsc_probe(struct hv_device *device,
1680 * from the host. 1799 * from the host.
1681 */ 1800 */
1682 host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); 1801 host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
1802 /*
1803 * Set the number of HW queues we are supporting.
1804 */
1805 if (stor_device->num_sc != 0)
1806 host->nr_hw_queues = stor_device->num_sc + 1;
1683 1807
1684 /* Register the HBA and start the scsi bus scan */ 1808 /* Register the HBA and start the scsi bus scan */
1685 ret = scsi_add_host(host, &device->device); 1809 ret = scsi_add_host(host, &device->device);
@@ -1716,6 +1840,7 @@ err_out2:
1716 goto err_out0; 1840 goto err_out0;
1717 1841
1718err_out1: 1842err_out1:
1843 kfree(stor_device->stor_chns);
1719 kfree(stor_device); 1844 kfree(stor_device);
1720 1845
1721err_out0: 1846err_out0:
@@ -1774,11 +1899,6 @@ static int __init storvsc_drv_init(void)
1774 fc_transport_template = fc_attach_transport(&fc_transport_functions); 1899 fc_transport_template = fc_attach_transport(&fc_transport_functions);
1775 if (!fc_transport_template) 1900 if (!fc_transport_template)
1776 return -ENODEV; 1901 return -ENODEV;
1777
1778 /*
1779 * Install Hyper-V specific timeout handler.
1780 */
1781 fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
1782#endif 1902#endif
1783 1903
1784 ret = vmbus_driver_register(&storvsc_drv); 1904 ret = vmbus_driver_register(&storvsc_drv);
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index bcf7d05d1aab..e64b0c542f95 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -34,7 +34,6 @@
34#include <asm/dvma.h> 34#include <asm/dvma.h>
35 35
36#include <scsi/scsi_host.h> 36#include <scsi/scsi_host.h>
37#include "sun3_scsi.h"
38 37
39/* minimum number of bytes to do dma on */ 38/* minimum number of bytes to do dma on */
40#define DMA_MIN_SIZE 129 39#define DMA_MIN_SIZE 129
@@ -56,11 +55,87 @@
56#define NCR5380_dma_send_setup sun3scsi_dma_count 55#define NCR5380_dma_send_setup sun3scsi_dma_count
57#define NCR5380_dma_residual sun3scsi_dma_residual 56#define NCR5380_dma_residual sun3scsi_dma_residual
58 57
59#define NCR5380_acquire_dma_irq(instance) (1)
60#define NCR5380_release_dma_irq(instance)
61
62#include "NCR5380.h" 58#include "NCR5380.h"
63 59
60/* dma regs start at regbase + 8, directly after the NCR regs */
61struct sun3_dma_regs {
62 unsigned short dma_addr_hi; /* vme only */
63 unsigned short dma_addr_lo; /* vme only */
64 unsigned short dma_count_hi; /* vme only */
65 unsigned short dma_count_lo; /* vme only */
66 unsigned short udc_data; /* udc dma data reg (obio only) */
67 unsigned short udc_addr; /* uda dma addr reg (obio only) */
68 unsigned short fifo_data; /* fifo data reg,
69 * holds extra byte on odd dma reads
70 */
71 unsigned short fifo_count;
72 unsigned short csr; /* control/status reg */
73 unsigned short bpack_hi; /* vme only */
74 unsigned short bpack_lo; /* vme only */
75 unsigned short ivect; /* vme only */
76 unsigned short fifo_count_hi; /* vme only */
77};
78
79/* ucd chip specific regs - live in dvma space */
80struct sun3_udc_regs {
81 unsigned short rsel; /* select regs to load */
82 unsigned short addr_hi; /* high word of addr */
83 unsigned short addr_lo; /* low word */
84 unsigned short count; /* words to be xfer'd */
85 unsigned short mode_hi; /* high word of channel mode */
86 unsigned short mode_lo; /* low word of channel mode */
87};
88
89/* addresses of the udc registers */
90#define UDC_MODE 0x38
91#define UDC_CSR 0x2e /* command/status */
92#define UDC_CHN_HI 0x26 /* chain high word */
93#define UDC_CHN_LO 0x22 /* chain lo word */
94#define UDC_CURA_HI 0x1a /* cur reg A high */
95#define UDC_CURA_LO 0x0a /* cur reg A low */
96#define UDC_CURB_HI 0x12 /* cur reg B high */
97#define UDC_CURB_LO 0x02 /* cur reg B low */
98#define UDC_MODE_HI 0x56 /* mode reg high */
99#define UDC_MODE_LO 0x52 /* mode reg low */
100#define UDC_COUNT 0x32 /* words to xfer */
101
102/* some udc commands */
103#define UDC_RESET 0
104#define UDC_CHN_START 0xa0 /* start chain */
105#define UDC_INT_ENABLE 0x32 /* channel 1 int on */
106
107/* udc mode words */
108#define UDC_MODE_HIWORD 0x40
109#define UDC_MODE_LSEND 0xc2
110#define UDC_MODE_LRECV 0xd2
111
112/* udc reg selections */
113#define UDC_RSEL_SEND 0x282
114#define UDC_RSEL_RECV 0x182
115
116/* bits in csr reg */
117#define CSR_DMA_ACTIVE 0x8000
118#define CSR_DMA_CONFLICT 0x4000
119#define CSR_DMA_BUSERR 0x2000
120
121#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */
122#define CSR_SDB_INT 0x200 /* sbc interrupt pending */
123#define CSR_DMA_INT 0x100 /* dma interrupt pending */
124
125#define CSR_LEFT 0xc0
126#define CSR_LEFT_3 0xc0
127#define CSR_LEFT_2 0x80
128#define CSR_LEFT_1 0x40
129#define CSR_PACK_ENABLE 0x20
130
131#define CSR_DMA_ENABLE 0x10
132
133#define CSR_SEND 0x8 /* 1 = send 0 = recv */
134#define CSR_FIFO 0x2 /* reset fifo */
135#define CSR_INTR 0x4 /* interrupt enable */
136#define CSR_SCSI 0x1
137
138#define VME_DATA24 0x3d00
64 139
65extern int sun3_map_test(unsigned long, char *); 140extern int sun3_map_test(unsigned long, char *);
66 141
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
deleted file mode 100644
index d22745fae328..000000000000
--- a/drivers/scsi/sun3_scsi.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
3 *
4 * Sun3 DMA additions by Sam Creasey (sammy@sammy.net)
5 *
6 * Adapted from mac_scsinew.h:
7 */
8/*
9 * Cumana Generic NCR5380 driver defines
10 *
11 * Copyright 1993, Drew Eckhardt
12 * Visionary Computing
13 * (Unix and Linux consulting and custom programming)
14 * drew@colorado.edu
15 * +1 (303) 440-4894
16 */
17
18#ifndef SUN3_SCSI_H
19#define SUN3_SCSI_H
20
21/* additional registers - mainly DMA control regs */
22/* these start at regbase + 8 -- directly after the NCR regs */
23struct sun3_dma_regs {
24 unsigned short dma_addr_hi; /* vme only */
25 unsigned short dma_addr_lo; /* vme only */
26 unsigned short dma_count_hi; /* vme only */
27 unsigned short dma_count_lo; /* vme only */
28 unsigned short udc_data; /* udc dma data reg (obio only) */
29 unsigned short udc_addr; /* uda dma addr reg (obio only) */
30 unsigned short fifo_data; /* fifo data reg, holds extra byte on
31 odd dma reads */
32 unsigned short fifo_count;
33 unsigned short csr; /* control/status reg */
34 unsigned short bpack_hi; /* vme only */
35 unsigned short bpack_lo; /* vme only */
36 unsigned short ivect; /* vme only */
37 unsigned short fifo_count_hi; /* vme only */
38};
39
40/* ucd chip specific regs - live in dvma space */
41struct sun3_udc_regs {
42 unsigned short rsel; /* select regs to load */
43 unsigned short addr_hi; /* high word of addr */
44 unsigned short addr_lo; /* low word */
45 unsigned short count; /* words to be xfer'd */
46 unsigned short mode_hi; /* high word of channel mode */
47 unsigned short mode_lo; /* low word of channel mode */
48};
49
50/* addresses of the udc registers */
51#define UDC_MODE 0x38
52#define UDC_CSR 0x2e /* command/status */
53#define UDC_CHN_HI 0x26 /* chain high word */
54#define UDC_CHN_LO 0x22 /* chain lo word */
55#define UDC_CURA_HI 0x1a /* cur reg A high */
56#define UDC_CURA_LO 0x0a /* cur reg A low */
57#define UDC_CURB_HI 0x12 /* cur reg B high */
58#define UDC_CURB_LO 0x02 /* cur reg B low */
59#define UDC_MODE_HI 0x56 /* mode reg high */
60#define UDC_MODE_LO 0x52 /* mode reg low */
61#define UDC_COUNT 0x32 /* words to xfer */
62
63/* some udc commands */
64#define UDC_RESET 0
65#define UDC_CHN_START 0xa0 /* start chain */
66#define UDC_INT_ENABLE 0x32 /* channel 1 int on */
67
68/* udc mode words */
69#define UDC_MODE_HIWORD 0x40
70#define UDC_MODE_LSEND 0xc2
71#define UDC_MODE_LRECV 0xd2
72
73/* udc reg selections */
74#define UDC_RSEL_SEND 0x282
75#define UDC_RSEL_RECV 0x182
76
77/* bits in csr reg */
78#define CSR_DMA_ACTIVE 0x8000
79#define CSR_DMA_CONFLICT 0x4000
80#define CSR_DMA_BUSERR 0x2000
81
82#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */
83#define CSR_SDB_INT 0x200 /* sbc interrupt pending */
84#define CSR_DMA_INT 0x100 /* dma interrupt pending */
85
86#define CSR_LEFT 0xc0
87#define CSR_LEFT_3 0xc0
88#define CSR_LEFT_2 0x80
89#define CSR_LEFT_1 0x40
90#define CSR_PACK_ENABLE 0x20
91
92#define CSR_DMA_ENABLE 0x10
93
94#define CSR_SEND 0x8 /* 1 = send 0 = recv */
95#define CSR_FIFO 0x2 /* reset fifo */
96#define CSR_INTR 0x4 /* interrupt enable */
97#define CSR_SCSI 0x1
98
99#define VME_DATA24 0x3d00
100
101#endif /* SUN3_SCSI_H */
102
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index abe617372661..ce5d023c1c91 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1497,17 +1497,21 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1497 1497
1498static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) 1498static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1499{ 1499{
1500 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 1500 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1501 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1502 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1501 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); 1503 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1502 else 1504 } else {
1505 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1503 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); 1506 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1507 }
1504} 1508}
1505 1509
1506static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1510static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1507{ 1511{
1508 /* provide a legal default configuration */ 1512 /* provide a legal default configuration */
1509 host->testbus.select_major = TSTBUS_UAWM; 1513 host->testbus.select_major = TSTBUS_UNIPRO;
1510 host->testbus.select_minor = 1; 1514 host->testbus.select_minor = 37;
1511} 1515}
1512 1516
1513static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) 1517static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
@@ -1524,7 +1528,7 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1524 * mappings of select_minor, since there is no harm in 1528 * mappings of select_minor, since there is no harm in
1525 * configuring a non-existent select_minor 1529 * configuring a non-existent select_minor
1526 */ 1530 */
1527 if (host->testbus.select_minor > 0x1F) { 1531 if (host->testbus.select_minor > 0xFF) {
1528 dev_err(host->hba->dev, 1532 dev_err(host->hba->dev,
1529 "%s: 0x%05X is not a legal testbus option\n", 1533 "%s: 0x%05X is not a legal testbus option\n",
1530 __func__, host->testbus.select_minor); 1534 __func__, host->testbus.select_minor);
@@ -1593,7 +1597,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1593 break; 1597 break;
1594 case TSTBUS_UNIPRO: 1598 case TSTBUS_UNIPRO:
1595 reg = UFS_UNIPRO_CFG; 1599 reg = UFS_UNIPRO_CFG;
1596 offset = 1; 1600 offset = 20;
1601 mask = 0xFFF;
1597 break; 1602 break;
1598 /* 1603 /*
1599 * No need for a default case, since 1604 * No need for a default case, since
@@ -1612,6 +1617,11 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1612 (u32)host->testbus.select_minor << offset, 1617 (u32)host->testbus.select_minor << offset,
1613 reg); 1618 reg);
1614 ufs_qcom_enable_test_bus(host); 1619 ufs_qcom_enable_test_bus(host);
1620 /*
1621 * Make sure the test bus configuration is
1622 * committed before returning.
1623 */
1624 mb();
1615 ufshcd_release(host->hba); 1625 ufshcd_release(host->hba);
1616 pm_runtime_put_sync(host->hba->dev); 1626 pm_runtime_put_sync(host->hba->dev);
1617 1627
@@ -1623,13 +1633,39 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1623 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); 1633 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
1624} 1634}
1625 1635
1636static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
1637{
1638 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1639 u32 *testbus = NULL;
1640 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
1641
1642 testbus = kmalloc(testbus_len, GFP_KERNEL);
1643 if (!testbus)
1644 return;
1645
1646 host->testbus.select_major = TSTBUS_UNIPRO;
1647 for (i = 0; i < nminor; i++) {
1648 host->testbus.select_minor = i;
1649 ufs_qcom_testbus_config(host);
1650 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
1651 }
1652 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
1653 16, 4, testbus, testbus_len, false);
1654 kfree(testbus);
1655}
1656
1626static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) 1657static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1627{ 1658{
1628 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, 1659 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
1629 "HCI Vendor Specific Registers "); 1660 "HCI Vendor Specific Registers ");
1630 1661
1662 /* sleep a bit intermittently as we are dumping too much data */
1631 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); 1663 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1664 usleep_range(1000, 1100);
1632 ufs_qcom_testbus_read(hba); 1665 ufs_qcom_testbus_read(hba);
1666 usleep_range(1000, 1100);
1667 ufs_qcom_print_unipro_testbus(hba);
1668 usleep_range(1000, 1100);
1633} 1669}
1634 1670
1635/** 1671/**
@@ -1692,6 +1728,7 @@ static const struct of_device_id ufs_qcom_of_match[] = {
1692 { .compatible = "qcom,ufshc"}, 1728 { .compatible = "qcom,ufshc"},
1693 {}, 1729 {},
1694}; 1730};
1731MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1695 1732
1696static const struct dev_pm_ops ufs_qcom_pm_ops = { 1733static const struct dev_pm_ops ufs_qcom_pm_ops = {
1697 .suspend = ufshcd_pltfrm_suspend, 1734 .suspend = ufshcd_pltfrm_suspend,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index fe517cd7dac3..076f52813a4c 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -95,6 +95,7 @@ enum {
95#define QUNIPRO_SEL UFS_BIT(0) 95#define QUNIPRO_SEL UFS_BIT(0)
96#define TEST_BUS_EN BIT(18) 96#define TEST_BUS_EN BIT(18)
97#define TEST_BUS_SEL GENMASK(22, 19) 97#define TEST_BUS_SEL GENMASK(22, 19)
98#define UFS_REG_TEST_BUS_EN BIT(30)
98 99
99/* bit definitions for REG_UFS_CFG2 register */ 100/* bit definitions for REG_UFS_CFG2 register */
100#define UAWM_HW_CGC_EN (1 << 0) 101#define UAWM_HW_CGC_EN (1 << 0)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 8e6709a3fb6b..318e4a1f76c9 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -523,4 +523,16 @@ struct ufs_dev_info {
523 bool is_lu_power_on_wp; 523 bool is_lu_power_on_wp;
524}; 524};
525 525
526#define MAX_MODEL_LEN 16
527/**
528 * ufs_dev_desc - ufs device details from the device descriptor
529 *
530 * @wmanufacturerid: card details
531 * @model: card model
532 */
533struct ufs_dev_desc {
534 u16 wmanufacturerid;
535 char model[MAX_MODEL_LEN + 1];
536};
537
526#endif /* End of Header */ 538#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 08b799d4efcc..71f73d1d1ad1 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -21,41 +21,28 @@
21#define UFS_ANY_VENDOR 0xFFFF 21#define UFS_ANY_VENDOR 0xFFFF
22#define UFS_ANY_MODEL "ANY_MODEL" 22#define UFS_ANY_MODEL "ANY_MODEL"
23 23
24#define MAX_MODEL_LEN 16
25
26#define UFS_VENDOR_TOSHIBA 0x198 24#define UFS_VENDOR_TOSHIBA 0x198
27#define UFS_VENDOR_SAMSUNG 0x1CE 25#define UFS_VENDOR_SAMSUNG 0x1CE
28#define UFS_VENDOR_SKHYNIX 0x1AD 26#define UFS_VENDOR_SKHYNIX 0x1AD
29 27
30/** 28/**
31 * ufs_device_info - ufs device details
32 * @wmanufacturerid: card details
33 * @model: card model
34 */
35struct ufs_device_info {
36 u16 wmanufacturerid;
37 char model[MAX_MODEL_LEN + 1];
38};
39
40/**
41 * ufs_dev_fix - ufs device quirk info 29 * ufs_dev_fix - ufs device quirk info
42 * @card: ufs card details 30 * @card: ufs card details
43 * @quirk: device quirk 31 * @quirk: device quirk
44 */ 32 */
45struct ufs_dev_fix { 33struct ufs_dev_fix {
46 struct ufs_device_info card; 34 struct ufs_dev_desc card;
47 unsigned int quirk; 35 unsigned int quirk;
48}; 36};
49 37
50#define END_FIX { { 0 }, 0 } 38#define END_FIX { { 0 }, 0 }
51 39
52/* add specific device quirk */ 40/* add specific device quirk */
53#define UFS_FIX(_vendor, _model, _quirk) \ 41#define UFS_FIX(_vendor, _model, _quirk) { \
54 { \ 42 .card.wmanufacturerid = (_vendor),\
55 .card.wmanufacturerid = (_vendor),\ 43 .card.model = (_model), \
56 .card.model = (_model), \ 44 .quirk = (_quirk), \
57 .quirk = (_quirk), \ 45}
58 }
59 46
60/* 47/*
61 * If UFS device is having issue in processing LCC (Line Control 48 * If UFS device is having issue in processing LCC (Line Control
@@ -144,7 +131,4 @@ struct ufs_dev_fix {
144 */ 131 */
145#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) 132#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
146 133
147struct ufs_hba;
148void ufs_advertise_fixup_device(struct ufs_hba *hba);
149
150#endif /* UFS_QUIRKS_H_ */ 134#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 20e5e5fb048c..8b721f431dd0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,9 @@
45#include "ufs_quirks.h" 45#include "ufs_quirks.h"
46#include "unipro.h" 46#include "unipro.h"
47 47
48#define CREATE_TRACE_POINTS
49#include <trace/events/ufs.h>
50
48#define UFSHCD_REQ_SENSE_SIZE 18 51#define UFSHCD_REQ_SENSE_SIZE 18
49 52
50#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 53#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
@@ -94,6 +97,9 @@
94 _ret; \ 97 _ret; \
95 }) 98 })
96 99
100#define ufshcd_hex_dump(prefix_str, buf, len) \
101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102
97static u32 ufs_query_desc_max_size[] = { 103static u32 ufs_query_desc_max_size[] = {
98 QUERY_DESC_DEVICE_MAX_SIZE, 104 QUERY_DESC_DEVICE_MAX_SIZE,
99 QUERY_DESC_CONFIGURAION_MAX_SIZE, 105 QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@ -185,6 +191,22 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
185 return ufs_pm_lvl_states[lvl].link_state; 191 return ufs_pm_lvl_states[lvl].link_state;
186} 192}
187 193
194static inline enum ufs_pm_level
195ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
196 enum uic_link_state link_state)
197{
198 enum ufs_pm_level lvl;
199
200 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
201 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
202 (ufs_pm_lvl_states[lvl].link_state == link_state))
203 return lvl;
204 }
205
206 /* if no match found, return the level 0 */
207 return UFS_PM_LVL_0;
208}
209
188static struct ufs_dev_fix ufs_fixups[] = { 210static struct ufs_dev_fix ufs_fixups[] = {
189 /* UFS cards deviations table */ 211 /* UFS cards deviations table */
190 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, 212 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
@@ -212,6 +234,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
212static void ufshcd_tmc_handler(struct ufs_hba *hba); 234static void ufshcd_tmc_handler(struct ufs_hba *hba);
213static void ufshcd_async_scan(void *data, async_cookie_t cookie); 235static void ufshcd_async_scan(void *data, async_cookie_t cookie);
214static int ufshcd_reset_and_restore(struct ufs_hba *hba); 236static int ufshcd_reset_and_restore(struct ufs_hba *hba);
237static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
215static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 238static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
216static void ufshcd_hba_exit(struct ufs_hba *hba); 239static void ufshcd_hba_exit(struct ufs_hba *hba);
217static int ufshcd_probe_hba(struct ufs_hba *hba); 240static int ufshcd_probe_hba(struct ufs_hba *hba);
@@ -223,6 +246,10 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
223static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); 246static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
224static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); 247static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
225static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 248static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
249static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
250static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
251static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
252static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
226static irqreturn_t ufshcd_intr(int irq, void *__hba); 253static irqreturn_t ufshcd_intr(int irq, void *__hba);
227static int ufshcd_config_pwr_mode(struct ufs_hba *hba, 254static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
228 struct ufs_pa_layer_attr *desired_pwr_mode); 255 struct ufs_pa_layer_attr *desired_pwr_mode);
@@ -267,6 +294,214 @@ static inline void ufshcd_remove_non_printable(char *val)
267 *val = ' '; 294 *val = ' ';
268} 295}
269 296
297static void ufshcd_add_command_trace(struct ufs_hba *hba,
298 unsigned int tag, const char *str)
299{
300 sector_t lba = -1;
301 u8 opcode = 0;
302 u32 intr, doorbell;
303 struct ufshcd_lrb *lrbp;
304 int transfer_len = -1;
305
306 if (!trace_ufshcd_command_enabled())
307 return;
308
309 lrbp = &hba->lrb[tag];
310
311 if (lrbp->cmd) { /* data phase exists */
312 opcode = (u8)(*lrbp->cmd->cmnd);
313 if ((opcode == READ_10) || (opcode == WRITE_10)) {
314 /*
315 * Currently we only fully trace read(10) and write(10)
316 * commands
317 */
318 if (lrbp->cmd->request && lrbp->cmd->request->bio)
319 lba =
320 lrbp->cmd->request->bio->bi_iter.bi_sector;
321 transfer_len = be32_to_cpu(
322 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
323 }
324 }
325
326 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
327 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
328 trace_ufshcd_command(dev_name(hba->dev), str, tag,
329 doorbell, transfer_len, intr, lba, opcode);
330}
331
332static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
333{
334 struct ufs_clk_info *clki;
335 struct list_head *head = &hba->clk_list_head;
336
337 if (!head || list_empty(head))
338 return;
339
340 list_for_each_entry(clki, head, list) {
341 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
342 clki->max_freq)
343 dev_err(hba->dev, "clk: %s, rate: %u\n",
344 clki->name, clki->curr_freq);
345 }
346}
347
348static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
349 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
350{
351 int i;
352
353 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
354 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
355
356 if (err_hist->reg[p] == 0)
357 continue;
358 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
359 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
360 }
361}
362
363static void ufshcd_print_host_regs(struct ufs_hba *hba)
364{
365 /*
366 * hex_dump reads its data without the readl macro. This might
367 * cause inconsistency issues on some platform, as the printed
368 * values may be from cache and not the most recent value.
369 * To know whether you are looking at an un-cached version verify
370 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
371 * during platform/pci probe function.
372 */
373 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
374 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
375 hba->ufs_version, hba->capabilities);
376 dev_err(hba->dev,
377 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
378 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
379 dev_err(hba->dev,
380 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
381 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
382 hba->ufs_stats.hibern8_exit_cnt);
383
384 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
385 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
386 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
387 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
388 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
389
390 ufshcd_print_clk_freqs(hba);
391
392 if (hba->vops && hba->vops->dbg_register_dump)
393 hba->vops->dbg_register_dump(hba);
394}
395
396static
397void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
398{
399 struct ufshcd_lrb *lrbp;
400 int prdt_length;
401 int tag;
402
403 for_each_set_bit(tag, &bitmap, hba->nutrs) {
404 lrbp = &hba->lrb[tag];
405
406 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
407 tag, ktime_to_us(lrbp->issue_time_stamp));
408 dev_err(hba->dev,
409 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
410 tag, (u64)lrbp->utrd_dma_addr);
411
412 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
413 sizeof(struct utp_transfer_req_desc));
414 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
415 (u64)lrbp->ucd_req_dma_addr);
416 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
417 sizeof(struct utp_upiu_req));
418 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
419 (u64)lrbp->ucd_rsp_dma_addr);
420 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
421 sizeof(struct utp_upiu_rsp));
422
423 prdt_length = le16_to_cpu(
424 lrbp->utr_descriptor_ptr->prd_table_length);
425 dev_err(hba->dev,
426 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
427 tag, prdt_length,
428 (u64)lrbp->ucd_prdt_dma_addr);
429
430 if (pr_prdt)
431 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
432 sizeof(struct ufshcd_sg_entry) * prdt_length);
433 }
434}
435
436static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
437{
438 struct utp_task_req_desc *tmrdp;
439 int tag;
440
441 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
442 tmrdp = &hba->utmrdl_base_addr[tag];
443 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
444 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
445 sizeof(struct request_desc_header));
446 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
447 tag);
448 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
449 sizeof(struct utp_upiu_req));
450 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
451 tag);
452 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
453 sizeof(struct utp_task_req_desc));
454 }
455}
456
457static void ufshcd_print_host_state(struct ufs_hba *hba)
458{
459 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
460 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
461 hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
462 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
463 hba->saved_err, hba->saved_uic_err);
464 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
465 hba->curr_dev_pwr_mode, hba->uic_link_state);
466 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
467 hba->pm_op_in_progress, hba->is_sys_suspended);
468 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
469 hba->auto_bkops_enabled, hba->host->host_self_blocked);
470 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
471 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
472 hba->eh_flags, hba->req_abort_count);
473 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
474 hba->capabilities, hba->caps);
475 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
476 hba->dev_quirks);
477}
478
479/**
480 * ufshcd_print_pwr_info - print power params as saved in hba
481 * power info
482 * @hba: per-adapter instance
483 */
484static void ufshcd_print_pwr_info(struct ufs_hba *hba)
485{
486 static const char * const names[] = {
487 "INVALID MODE",
488 "FAST MODE",
489 "SLOW_MODE",
490 "INVALID MODE",
491 "FASTAUTO_MODE",
492 "SLOWAUTO_MODE",
493 "INVALID MODE",
494 };
495
496 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
497 __func__,
498 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
499 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
500 names[hba->pwr_info.pwr_rx],
501 names[hba->pwr_info.pwr_tx],
502 hba->pwr_info.hs_rate);
503}
504
270/* 505/*
271 * ufshcd_wait_for_register - wait for register value to change 506 * ufshcd_wait_for_register - wait for register value to change
272 * @hba - per-adapter interface 507 * @hba - per-adapter interface
@@ -605,6 +840,28 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
605 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; 840 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
606} 841}
607 842
843static const char *ufschd_uic_link_state_to_string(
844 enum uic_link_state state)
845{
846 switch (state) {
847 case UIC_LINK_OFF_STATE: return "OFF";
848 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
849 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
850 default: return "UNKNOWN";
851 }
852}
853
854static const char *ufschd_ufs_dev_pwr_mode_to_string(
855 enum ufs_dev_pwr_mode state)
856{
857 switch (state) {
858 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
859 case UFS_SLEEP_PWR_MODE: return "SLEEP";
860 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
861 default: return "UNKNOWN";
862 }
863}
864
608u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) 865u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
609{ 866{
610 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ 867 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
@@ -633,20 +890,523 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
633 return false; 890 return false;
634} 891}
635 892
893static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
894{
895 int ret = 0;
896 struct ufs_clk_info *clki;
897 struct list_head *head = &hba->clk_list_head;
898 ktime_t start = ktime_get();
899 bool clk_state_changed = false;
900
901 if (!head || list_empty(head))
902 goto out;
903
904 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
905 if (ret)
906 return ret;
907
908 list_for_each_entry(clki, head, list) {
909 if (!IS_ERR_OR_NULL(clki->clk)) {
910 if (scale_up && clki->max_freq) {
911 if (clki->curr_freq == clki->max_freq)
912 continue;
913
914 clk_state_changed = true;
915 ret = clk_set_rate(clki->clk, clki->max_freq);
916 if (ret) {
917 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
918 __func__, clki->name,
919 clki->max_freq, ret);
920 break;
921 }
922 trace_ufshcd_clk_scaling(dev_name(hba->dev),
923 "scaled up", clki->name,
924 clki->curr_freq,
925 clki->max_freq);
926
927 clki->curr_freq = clki->max_freq;
928
929 } else if (!scale_up && clki->min_freq) {
930 if (clki->curr_freq == clki->min_freq)
931 continue;
932
933 clk_state_changed = true;
934 ret = clk_set_rate(clki->clk, clki->min_freq);
935 if (ret) {
936 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
937 __func__, clki->name,
938 clki->min_freq, ret);
939 break;
940 }
941 trace_ufshcd_clk_scaling(dev_name(hba->dev),
942 "scaled down", clki->name,
943 clki->curr_freq,
944 clki->min_freq);
945 clki->curr_freq = clki->min_freq;
946 }
947 }
948 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
949 clki->name, clk_get_rate(clki->clk));
950 }
951
952 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
953
954out:
955 if (clk_state_changed)
956 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
957 (scale_up ? "up" : "down"),
958 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
959 return ret;
960}
961
962/**
963 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
964 * @hba: per adapter instance
965 * @scale_up: True if scaling up and false if scaling down
966 *
967 * Returns true if scaling is required, false otherwise.
968 */
969static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
970 bool scale_up)
971{
972 struct ufs_clk_info *clki;
973 struct list_head *head = &hba->clk_list_head;
974
975 if (!head || list_empty(head))
976 return false;
977
978 list_for_each_entry(clki, head, list) {
979 if (!IS_ERR_OR_NULL(clki->clk)) {
980 if (scale_up && clki->max_freq) {
981 if (clki->curr_freq == clki->max_freq)
982 continue;
983 return true;
984 } else if (!scale_up && clki->min_freq) {
985 if (clki->curr_freq == clki->min_freq)
986 continue;
987 return true;
988 }
989 }
990 }
991
992 return false;
993}
994
995static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
996 u64 wait_timeout_us)
997{
998 unsigned long flags;
999 int ret = 0;
1000 u32 tm_doorbell;
1001 u32 tr_doorbell;
1002 bool timeout = false, do_last_check = false;
1003 ktime_t start;
1004
1005 ufshcd_hold(hba, false);
1006 spin_lock_irqsave(hba->host->host_lock, flags);
1007 /*
1008 * Wait for all the outstanding tasks/transfer requests.
1009 * Verify by checking the doorbell registers are clear.
1010 */
1011 start = ktime_get();
1012 do {
1013 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1014 ret = -EBUSY;
1015 goto out;
1016 }
1017
1018 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1019 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1020 if (!tm_doorbell && !tr_doorbell) {
1021 timeout = false;
1022 break;
1023 } else if (do_last_check) {
1024 break;
1025 }
1026
1027 spin_unlock_irqrestore(hba->host->host_lock, flags);
1028 schedule();
1029 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1030 wait_timeout_us) {
1031 timeout = true;
1032 /*
1033 * We might have scheduled out for long time so make
1034 * sure to check if doorbells are cleared by this time
1035 * or not.
1036 */
1037 do_last_check = true;
1038 }
1039 spin_lock_irqsave(hba->host->host_lock, flags);
1040 } while (tm_doorbell || tr_doorbell);
1041
1042 if (timeout) {
1043 dev_err(hba->dev,
1044 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1045 __func__, tm_doorbell, tr_doorbell);
1046 ret = -EBUSY;
1047 }
1048out:
1049 spin_unlock_irqrestore(hba->host->host_lock, flags);
1050 ufshcd_release(hba);
1051 return ret;
1052}
1053
1054/**
1055 * ufshcd_scale_gear - scale up/down UFS gear
1056 * @hba: per adapter instance
1057 * @scale_up: True for scaling up gear and false for scaling down
1058 *
1059 * Returns 0 for success,
1060 * Returns -EBUSY if scaling can't happen at this time
1061 * Returns non-zero for any other errors
1062 */
1063static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1064{
1065 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1066 int ret = 0;
1067 struct ufs_pa_layer_attr new_pwr_info;
1068
1069 if (scale_up) {
1070 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1071 sizeof(struct ufs_pa_layer_attr));
1072 } else {
1073 memcpy(&new_pwr_info, &hba->pwr_info,
1074 sizeof(struct ufs_pa_layer_attr));
1075
1076 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1077 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1078 /* save the current power mode */
1079 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1080 &hba->pwr_info,
1081 sizeof(struct ufs_pa_layer_attr));
1082
1083 /* scale down gear */
1084 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1085 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1086 }
1087 }
1088
1089 /* check if the power mode needs to be changed or not? */
1090 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1091
1092 if (ret)
1093 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1094 __func__, ret,
1095 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1096 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1097
1098 return ret;
1099}
1100
1101static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1102{
1103 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1104 int ret = 0;
1105 /*
1106 * make sure that there are no outstanding requests when
1107 * clock scaling is in progress
1108 */
1109 scsi_block_requests(hba->host);
1110 down_write(&hba->clk_scaling_lock);
1111 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1112 ret = -EBUSY;
1113 up_write(&hba->clk_scaling_lock);
1114 scsi_unblock_requests(hba->host);
1115 }
1116
1117 return ret;
1118}
1119
1120static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1121{
1122 up_write(&hba->clk_scaling_lock);
1123 scsi_unblock_requests(hba->host);
1124}
1125
1126/**
1127 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1128 * @hba: per adapter instance
1129 * @scale_up: True for scaling up and false for scalin down
1130 *
1131 * Returns 0 for success,
1132 * Returns -EBUSY if scaling can't happen at this time
1133 * Returns non-zero for any other errors
1134 */
1135static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1136{
1137 int ret = 0;
1138
1139 /* let's not get into low power until clock scaling is completed */
1140 ufshcd_hold(hba, false);
1141
1142 ret = ufshcd_clock_scaling_prepare(hba);
1143 if (ret)
1144 return ret;
1145
1146 /* scale down the gear before scaling down clocks */
1147 if (!scale_up) {
1148 ret = ufshcd_scale_gear(hba, false);
1149 if (ret)
1150 goto out;
1151 }
1152
1153 ret = ufshcd_scale_clks(hba, scale_up);
1154 if (ret) {
1155 if (!scale_up)
1156 ufshcd_scale_gear(hba, true);
1157 goto out;
1158 }
1159
1160 /* scale up the gear after scaling up clocks */
1161 if (scale_up) {
1162 ret = ufshcd_scale_gear(hba, true);
1163 if (ret) {
1164 ufshcd_scale_clks(hba, false);
1165 goto out;
1166 }
1167 }
1168
1169 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1170
1171out:
1172 ufshcd_clock_scaling_unprepare(hba);
1173 ufshcd_release(hba);
1174 return ret;
1175}
1176
1177static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1178{
1179 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1180 clk_scaling.suspend_work);
1181 unsigned long irq_flags;
1182
1183 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1184 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1185 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1186 return;
1187 }
1188 hba->clk_scaling.is_suspended = true;
1189 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1190
1191 __ufshcd_suspend_clkscaling(hba);
1192}
1193
1194static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1195{
1196 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1197 clk_scaling.resume_work);
1198 unsigned long irq_flags;
1199
1200 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1201 if (!hba->clk_scaling.is_suspended) {
1202 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1203 return;
1204 }
1205 hba->clk_scaling.is_suspended = false;
1206 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1207
1208 devfreq_resume_device(hba->devfreq);
1209}
1210
1211static int ufshcd_devfreq_target(struct device *dev,
1212 unsigned long *freq, u32 flags)
1213{
1214 int ret = 0;
1215 struct ufs_hba *hba = dev_get_drvdata(dev);
1216 ktime_t start;
1217 bool scale_up, sched_clk_scaling_suspend_work = false;
1218 unsigned long irq_flags;
1219
1220 if (!ufshcd_is_clkscaling_supported(hba))
1221 return -EINVAL;
1222
1223 if ((*freq > 0) && (*freq < UINT_MAX)) {
1224 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
1225 return -EINVAL;
1226 }
1227
1228 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1229 if (ufshcd_eh_in_progress(hba)) {
1230 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1231 return 0;
1232 }
1233
1234 if (!hba->clk_scaling.active_reqs)
1235 sched_clk_scaling_suspend_work = true;
1236
1237 scale_up = (*freq == UINT_MAX) ? true : false;
1238 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1239 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1240 ret = 0;
1241 goto out; /* no state change required */
1242 }
1243 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1244
1245 start = ktime_get();
1246 ret = ufshcd_devfreq_scale(hba, scale_up);
1247
1248 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1249 (scale_up ? "up" : "down"),
1250 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1251
1252out:
1253 if (sched_clk_scaling_suspend_work)
1254 queue_work(hba->clk_scaling.workq,
1255 &hba->clk_scaling.suspend_work);
1256
1257 return ret;
1258}
1259
1260
1261static int ufshcd_devfreq_get_dev_status(struct device *dev,
1262 struct devfreq_dev_status *stat)
1263{
1264 struct ufs_hba *hba = dev_get_drvdata(dev);
1265 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1266 unsigned long flags;
1267
1268 if (!ufshcd_is_clkscaling_supported(hba))
1269 return -EINVAL;
1270
1271 memset(stat, 0, sizeof(*stat));
1272
1273 spin_lock_irqsave(hba->host->host_lock, flags);
1274 if (!scaling->window_start_t)
1275 goto start_window;
1276
1277 if (scaling->is_busy_started)
1278 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1279 scaling->busy_start_t));
1280
1281 stat->total_time = jiffies_to_usecs((long)jiffies -
1282 (long)scaling->window_start_t);
1283 stat->busy_time = scaling->tot_busy_t;
1284start_window:
1285 scaling->window_start_t = jiffies;
1286 scaling->tot_busy_t = 0;
1287
1288 if (hba->outstanding_reqs) {
1289 scaling->busy_start_t = ktime_get();
1290 scaling->is_busy_started = true;
1291 } else {
1292 scaling->busy_start_t = 0;
1293 scaling->is_busy_started = false;
1294 }
1295 spin_unlock_irqrestore(hba->host->host_lock, flags);
1296 return 0;
1297}
1298
1299static struct devfreq_dev_profile ufs_devfreq_profile = {
1300 .polling_ms = 100,
1301 .target = ufshcd_devfreq_target,
1302 .get_dev_status = ufshcd_devfreq_get_dev_status,
1303};
1304
1305static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1306{
1307 unsigned long flags;
1308
1309 devfreq_suspend_device(hba->devfreq);
1310 spin_lock_irqsave(hba->host->host_lock, flags);
1311 hba->clk_scaling.window_start_t = 0;
1312 spin_unlock_irqrestore(hba->host->host_lock, flags);
1313}
1314
636static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1315static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
637{ 1316{
638 if (ufshcd_is_clkscaling_enabled(hba)) { 1317 unsigned long flags;
639 devfreq_suspend_device(hba->devfreq); 1318 bool suspend = false;
640 hba->clk_scaling.window_start_t = 0; 1319
1320 if (!ufshcd_is_clkscaling_supported(hba))
1321 return;
1322
1323 spin_lock_irqsave(hba->host->host_lock, flags);
1324 if (!hba->clk_scaling.is_suspended) {
1325 suspend = true;
1326 hba->clk_scaling.is_suspended = true;
641 } 1327 }
1328 spin_unlock_irqrestore(hba->host->host_lock, flags);
1329
1330 if (suspend)
1331 __ufshcd_suspend_clkscaling(hba);
642} 1332}
643 1333
644static void ufshcd_resume_clkscaling(struct ufs_hba *hba) 1334static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
645{ 1335{
646 if (ufshcd_is_clkscaling_enabled(hba)) 1336 unsigned long flags;
1337 bool resume = false;
1338
1339 if (!ufshcd_is_clkscaling_supported(hba))
1340 return;
1341
1342 spin_lock_irqsave(hba->host->host_lock, flags);
1343 if (hba->clk_scaling.is_suspended) {
1344 resume = true;
1345 hba->clk_scaling.is_suspended = false;
1346 }
1347 spin_unlock_irqrestore(hba->host->host_lock, flags);
1348
1349 if (resume)
647 devfreq_resume_device(hba->devfreq); 1350 devfreq_resume_device(hba->devfreq);
648} 1351}
649 1352
1353static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1354 struct device_attribute *attr, char *buf)
1355{
1356 struct ufs_hba *hba = dev_get_drvdata(dev);
1357
1358 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1359}
1360
1361static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1362 struct device_attribute *attr, const char *buf, size_t count)
1363{
1364 struct ufs_hba *hba = dev_get_drvdata(dev);
1365 u32 value;
1366 int err;
1367
1368 if (kstrtou32(buf, 0, &value))
1369 return -EINVAL;
1370
1371 value = !!value;
1372 if (value == hba->clk_scaling.is_allowed)
1373 goto out;
1374
1375 pm_runtime_get_sync(hba->dev);
1376 ufshcd_hold(hba, false);
1377
1378 cancel_work_sync(&hba->clk_scaling.suspend_work);
1379 cancel_work_sync(&hba->clk_scaling.resume_work);
1380
1381 hba->clk_scaling.is_allowed = value;
1382
1383 if (value) {
1384 ufshcd_resume_clkscaling(hba);
1385 } else {
1386 ufshcd_suspend_clkscaling(hba);
1387 err = ufshcd_devfreq_scale(hba, true);
1388 if (err)
1389 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1390 __func__, err);
1391 }
1392
1393 ufshcd_release(hba);
1394 pm_runtime_put_sync(hba->dev);
1395out:
1396 return count;
1397}
1398
1399static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1400{
1401 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1402 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1403 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1404 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1405 hba->clk_scaling.enable_attr.attr.mode = 0644;
1406 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1407 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1408}
1409
650static void ufshcd_ungate_work(struct work_struct *work) 1410static void ufshcd_ungate_work(struct work_struct *work)
651{ 1411{
652 int ret; 1412 int ret;
@@ -680,7 +1440,6 @@ static void ufshcd_ungate_work(struct work_struct *work)
680 hba->clk_gating.is_suspended = false; 1440 hba->clk_gating.is_suspended = false;
681 } 1441 }
682unblock_reqs: 1442unblock_reqs:
683 ufshcd_resume_clkscaling(hba);
684 scsi_unblock_requests(hba->host); 1443 scsi_unblock_requests(hba->host);
685} 1444}
686 1445
@@ -727,6 +1486,8 @@ start:
727 case REQ_CLKS_OFF: 1486 case REQ_CLKS_OFF:
728 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { 1487 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
729 hba->clk_gating.state = CLKS_ON; 1488 hba->clk_gating.state = CLKS_ON;
1489 trace_ufshcd_clk_gating(dev_name(hba->dev),
1490 hba->clk_gating.state);
730 break; 1491 break;
731 } 1492 }
732 /* 1493 /*
@@ -737,6 +1498,8 @@ start:
737 case CLKS_OFF: 1498 case CLKS_OFF:
738 scsi_block_requests(hba->host); 1499 scsi_block_requests(hba->host);
739 hba->clk_gating.state = REQ_CLKS_ON; 1500 hba->clk_gating.state = REQ_CLKS_ON;
1501 trace_ufshcd_clk_gating(dev_name(hba->dev),
1502 hba->clk_gating.state);
740 schedule_work(&hba->clk_gating.ungate_work); 1503 schedule_work(&hba->clk_gating.ungate_work);
741 /* 1504 /*
742 * fall through to check if we should wait for this 1505 * fall through to check if we should wait for this
@@ -781,6 +1544,8 @@ static void ufshcd_gate_work(struct work_struct *work)
781 if (hba->clk_gating.is_suspended || 1544 if (hba->clk_gating.is_suspended ||
782 (hba->clk_gating.state == REQ_CLKS_ON)) { 1545 (hba->clk_gating.state == REQ_CLKS_ON)) {
783 hba->clk_gating.state = CLKS_ON; 1546 hba->clk_gating.state = CLKS_ON;
1547 trace_ufshcd_clk_gating(dev_name(hba->dev),
1548 hba->clk_gating.state);
784 goto rel_lock; 1549 goto rel_lock;
785 } 1550 }
786 1551
@@ -796,13 +1561,13 @@ static void ufshcd_gate_work(struct work_struct *work)
796 if (ufshcd_can_hibern8_during_gating(hba)) { 1561 if (ufshcd_can_hibern8_during_gating(hba)) {
797 if (ufshcd_uic_hibern8_enter(hba)) { 1562 if (ufshcd_uic_hibern8_enter(hba)) {
798 hba->clk_gating.state = CLKS_ON; 1563 hba->clk_gating.state = CLKS_ON;
1564 trace_ufshcd_clk_gating(dev_name(hba->dev),
1565 hba->clk_gating.state);
799 goto out; 1566 goto out;
800 } 1567 }
801 ufshcd_set_link_hibern8(hba); 1568 ufshcd_set_link_hibern8(hba);
802 } 1569 }
803 1570
804 ufshcd_suspend_clkscaling(hba);
805
806 if (!ufshcd_is_link_active(hba)) 1571 if (!ufshcd_is_link_active(hba))
807 ufshcd_setup_clocks(hba, false); 1572 ufshcd_setup_clocks(hba, false);
808 else 1573 else
@@ -819,9 +1584,11 @@ static void ufshcd_gate_work(struct work_struct *work)
819 * new requests arriving before the current cancel work is done. 1584 * new requests arriving before the current cancel work is done.
820 */ 1585 */
821 spin_lock_irqsave(hba->host->host_lock, flags); 1586 spin_lock_irqsave(hba->host->host_lock, flags);
822 if (hba->clk_gating.state == REQ_CLKS_OFF) 1587 if (hba->clk_gating.state == REQ_CLKS_OFF) {
823 hba->clk_gating.state = CLKS_OFF; 1588 hba->clk_gating.state = CLKS_OFF;
824 1589 trace_ufshcd_clk_gating(dev_name(hba->dev),
1590 hba->clk_gating.state);
1591 }
825rel_lock: 1592rel_lock:
826 spin_unlock_irqrestore(hba->host->host_lock, flags); 1593 spin_unlock_irqrestore(hba->host->host_lock, flags);
827out: 1594out:
@@ -844,6 +1611,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
844 return; 1611 return;
845 1612
846 hba->clk_gating.state = REQ_CLKS_OFF; 1613 hba->clk_gating.state = REQ_CLKS_OFF;
1614 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
847 schedule_delayed_work(&hba->clk_gating.gate_work, 1615 schedule_delayed_work(&hba->clk_gating.gate_work,
848 msecs_to_jiffies(hba->clk_gating.delay_ms)); 1616 msecs_to_jiffies(hba->clk_gating.delay_ms));
849} 1617}
@@ -881,6 +1649,41 @@ static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
881 return count; 1649 return count;
882} 1650}
883 1651
1652static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1653 struct device_attribute *attr, char *buf)
1654{
1655 struct ufs_hba *hba = dev_get_drvdata(dev);
1656
1657 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1658}
1659
1660static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1661 struct device_attribute *attr, const char *buf, size_t count)
1662{
1663 struct ufs_hba *hba = dev_get_drvdata(dev);
1664 unsigned long flags;
1665 u32 value;
1666
1667 if (kstrtou32(buf, 0, &value))
1668 return -EINVAL;
1669
1670 value = !!value;
1671 if (value == hba->clk_gating.is_enabled)
1672 goto out;
1673
1674 if (value) {
1675 ufshcd_release(hba);
1676 } else {
1677 spin_lock_irqsave(hba->host->host_lock, flags);
1678 hba->clk_gating.active_reqs++;
1679 spin_unlock_irqrestore(hba->host->host_lock, flags);
1680 }
1681
1682 hba->clk_gating.is_enabled = value;
1683out:
1684 return count;
1685}
1686
884static void ufshcd_init_clk_gating(struct ufs_hba *hba) 1687static void ufshcd_init_clk_gating(struct ufs_hba *hba)
885{ 1688{
886 if (!ufshcd_is_clkgating_allowed(hba)) 1689 if (!ufshcd_is_clkgating_allowed(hba))
@@ -890,13 +1693,23 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
890 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); 1693 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
891 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); 1694 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
892 1695
1696 hba->clk_gating.is_enabled = true;
1697
893 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; 1698 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
894 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; 1699 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
895 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); 1700 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
896 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; 1701 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
897 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; 1702 hba->clk_gating.delay_attr.attr.mode = 0644;
898 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) 1703 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
899 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); 1704 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1705
1706 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1707 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1708 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1709 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1710 hba->clk_gating.enable_attr.attr.mode = 0644;
1711 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1712 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
900} 1713}
901 1714
902static void ufshcd_exit_clk_gating(struct ufs_hba *hba) 1715static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
@@ -904,6 +1717,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
904 if (!ufshcd_is_clkgating_allowed(hba)) 1717 if (!ufshcd_is_clkgating_allowed(hba))
905 return; 1718 return;
906 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); 1719 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1720 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
907 cancel_work_sync(&hba->clk_gating.ungate_work); 1721 cancel_work_sync(&hba->clk_gating.ungate_work);
908 cancel_delayed_work_sync(&hba->clk_gating.gate_work); 1722 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
909} 1723}
@@ -911,9 +1725,27 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
911/* Must be called with host lock acquired */ 1725/* Must be called with host lock acquired */
912static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) 1726static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
913{ 1727{
914 if (!ufshcd_is_clkscaling_enabled(hba)) 1728 bool queue_resume_work = false;
1729
1730 if (!ufshcd_is_clkscaling_supported(hba))
1731 return;
1732
1733 if (!hba->clk_scaling.active_reqs++)
1734 queue_resume_work = true;
1735
1736 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
915 return; 1737 return;
916 1738
1739 if (queue_resume_work)
1740 queue_work(hba->clk_scaling.workq,
1741 &hba->clk_scaling.resume_work);
1742
1743 if (!hba->clk_scaling.window_start_t) {
1744 hba->clk_scaling.window_start_t = jiffies;
1745 hba->clk_scaling.tot_busy_t = 0;
1746 hba->clk_scaling.is_busy_started = false;
1747 }
1748
917 if (!hba->clk_scaling.is_busy_started) { 1749 if (!hba->clk_scaling.is_busy_started) {
918 hba->clk_scaling.busy_start_t = ktime_get(); 1750 hba->clk_scaling.busy_start_t = ktime_get();
919 hba->clk_scaling.is_busy_started = true; 1751 hba->clk_scaling.is_busy_started = true;
@@ -924,7 +1756,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
924{ 1756{
925 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 1757 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
926 1758
927 if (!ufshcd_is_clkscaling_enabled(hba)) 1759 if (!ufshcd_is_clkscaling_supported(hba))
928 return; 1760 return;
929 1761
930 if (!hba->outstanding_reqs && scaling->is_busy_started) { 1762 if (!hba->outstanding_reqs && scaling->is_busy_started) {
@@ -942,11 +1774,13 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
942static inline 1774static inline
943void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) 1775void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
944{ 1776{
1777 hba->lrb[task_tag].issue_time_stamp = ktime_get();
945 ufshcd_clk_scaling_start_busy(hba); 1778 ufshcd_clk_scaling_start_busy(hba);
946 __set_bit(task_tag, &hba->outstanding_reqs); 1779 __set_bit(task_tag, &hba->outstanding_reqs);
947 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); 1780 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
948 /* Make sure that doorbell is committed immediately */ 1781 /* Make sure that doorbell is committed immediately */
949 wmb(); 1782 wmb();
1783 ufshcd_add_command_trace(hba, task_tag, "send");
950} 1784}
951 1785
952/** 1786/**
@@ -1484,6 +2318,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1484 BUG(); 2318 BUG();
1485 } 2319 }
1486 2320
2321 if (!down_read_trylock(&hba->clk_scaling_lock))
2322 return SCSI_MLQUEUE_HOST_BUSY;
2323
1487 spin_lock_irqsave(hba->host->host_lock, flags); 2324 spin_lock_irqsave(hba->host->host_lock, flags);
1488 switch (hba->ufshcd_state) { 2325 switch (hba->ufshcd_state) {
1489 case UFSHCD_STATE_OPERATIONAL: 2326 case UFSHCD_STATE_OPERATIONAL:
@@ -1512,6 +2349,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1512 } 2349 }
1513 spin_unlock_irqrestore(hba->host->host_lock, flags); 2350 spin_unlock_irqrestore(hba->host->host_lock, flags);
1514 2351
2352 hba->req_abort_count = 0;
2353
1515 /* acquire the tag to make sure device cmds don't use it */ 2354 /* acquire the tag to make sure device cmds don't use it */
1516 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { 2355 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1517 /* 2356 /*
@@ -1541,6 +2380,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1541 lrbp->task_tag = tag; 2380 lrbp->task_tag = tag;
1542 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 2381 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
1543 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; 2382 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2383 lrbp->req_abort_skip = false;
1544 2384
1545 ufshcd_comp_scsi_upiu(hba, lrbp); 2385 ufshcd_comp_scsi_upiu(hba, lrbp);
1546 2386
@@ -1560,6 +2400,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1560out_unlock: 2400out_unlock:
1561 spin_unlock_irqrestore(hba->host->host_lock, flags); 2401 spin_unlock_irqrestore(hba->host->host_lock, flags);
1562out: 2402out:
2403 up_read(&hba->clk_scaling_lock);
1563 return err; 2404 return err;
1564} 2405}
1565 2406
@@ -1622,6 +2463,7 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1622 int resp; 2463 int resp;
1623 int err = 0; 2464 int err = 0;
1624 2465
2466 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
1625 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); 2467 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1626 2468
1627 switch (resp) { 2469 switch (resp) {
@@ -1748,6 +2590,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1748 struct completion wait; 2590 struct completion wait;
1749 unsigned long flags; 2591 unsigned long flags;
1750 2592
2593 down_read(&hba->clk_scaling_lock);
2594
1751 /* 2595 /*
1752 * Get free slot, sleep if slots are unavailable. 2596 * Get free slot, sleep if slots are unavailable.
1753 * Even though we use wait_event() which sleeps indefinitely, 2597 * Even though we use wait_event() which sleeps indefinitely,
@@ -1776,6 +2620,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1776out_put_tag: 2620out_put_tag:
1777 ufshcd_put_dev_cmd_tag(hba, tag); 2621 ufshcd_put_dev_cmd_tag(hba, tag);
1778 wake_up(&hba->dev_cmd.tag_wq); 2622 wake_up(&hba->dev_cmd.tag_wq);
2623 up_read(&hba->clk_scaling_lock);
1779 return err; 2624 return err;
1780} 2625}
1781 2626
@@ -2073,9 +2918,11 @@ out:
2073 * The buf_len parameter will contain, on return, the length parameter 2918 * The buf_len parameter will contain, on return, the length parameter
2074 * received on the response. 2919 * received on the response.
2075 */ 2920 */
2076int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 2921static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2077 enum query_opcode opcode, enum desc_idn idn, u8 index, 2922 enum query_opcode opcode,
2078 u8 selector, u8 *desc_buf, int *buf_len) 2923 enum desc_idn idn, u8 index,
2924 u8 selector,
2925 u8 *desc_buf, int *buf_len)
2079{ 2926{
2080 int err; 2927 int err;
2081 int retries; 2928 int retries;
@@ -2089,7 +2936,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2089 2936
2090 return err; 2937 return err;
2091} 2938}
2092EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2093 2939
2094/** 2940/**
2095 * ufshcd_read_desc_param - read the specified descriptor parameter 2941 * ufshcd_read_desc_param - read the specified descriptor parameter
@@ -2207,11 +3053,10 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2207 return err; 3053 return err;
2208} 3054}
2209 3055
2210int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) 3056static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2211{ 3057{
2212 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); 3058 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2213} 3059}
2214EXPORT_SYMBOL(ufshcd_read_device_desc);
2215 3060
2216/** 3061/**
2217 * ufshcd_read_string_desc - read string descriptor 3062 * ufshcd_read_string_desc - read string descriptor
@@ -2223,8 +3068,9 @@ EXPORT_SYMBOL(ufshcd_read_device_desc);
2223 * 3068 *
2224 * Return 0 in case of success, non-zero otherwise 3069 * Return 0 in case of success, non-zero otherwise
2225 */ 3070 */
2226int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, 3071#define ASCII_STD true
2227 u32 size, bool ascii) 3072static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3073 u8 *buf, u32 size, bool ascii)
2228{ 3074{
2229 int err = 0; 3075 int err = 0;
2230 3076
@@ -2280,7 +3126,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2280out: 3126out:
2281 return err; 3127 return err;
2282} 3128}
2283EXPORT_SYMBOL(ufshcd_read_string_desc);
2284 3129
2285/** 3130/**
2286 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter 3131 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
@@ -2453,12 +3298,19 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
2453 } 3298 }
2454 3299
2455 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); 3300 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3301 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3302 (i * sizeof(struct utp_transfer_req_desc));
2456 hba->lrb[i].ucd_req_ptr = 3303 hba->lrb[i].ucd_req_ptr =
2457 (struct utp_upiu_req *)(cmd_descp + i); 3304 (struct utp_upiu_req *)(cmd_descp + i);
3305 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
2458 hba->lrb[i].ucd_rsp_ptr = 3306 hba->lrb[i].ucd_rsp_ptr =
2459 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; 3307 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3308 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3309 response_offset;
2460 hba->lrb[i].ucd_prdt_ptr = 3310 hba->lrb[i].ucd_prdt_ptr =
2461 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table; 3311 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3312 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3313 prdt_offset;
2462 } 3314 }
2463} 3315}
2464 3316
@@ -2482,7 +3334,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
2482 3334
2483 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 3335 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
2484 if (ret) 3336 if (ret)
2485 dev_err(hba->dev, 3337 dev_dbg(hba->dev,
2486 "dme-link-startup: error code %d\n", ret); 3338 "dme-link-startup: error code %d\n", ret);
2487 return ret; 3339 return ret;
2488} 3340}
@@ -2702,6 +3554,12 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2702 ret = (status != PWR_OK) ? status : -1; 3554 ret = (status != PWR_OK) ? status : -1;
2703 } 3555 }
2704out: 3556out:
3557 if (ret) {
3558 ufshcd_print_host_state(hba);
3559 ufshcd_print_pwr_info(hba);
3560 ufshcd_print_host_regs(hba);
3561 }
3562
2705 spin_lock_irqsave(hba->host->host_lock, flags); 3563 spin_lock_irqsave(hba->host->host_lock, flags);
2706 hba->active_uic_cmd = NULL; 3564 hba->active_uic_cmd = NULL;
2707 hba->uic_async_done = NULL; 3565 hba->uic_async_done = NULL;
@@ -2776,11 +3634,14 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2776{ 3634{
2777 int ret; 3635 int ret;
2778 struct uic_command uic_cmd = {0}; 3636 struct uic_command uic_cmd = {0};
3637 ktime_t start = ktime_get();
2779 3638
2780 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); 3639 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
2781 3640
2782 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; 3641 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2783 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 3642 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3643 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3644 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
2784 3645
2785 if (ret) { 3646 if (ret) {
2786 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", 3647 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
@@ -2816,18 +3677,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2816{ 3677{
2817 struct uic_command uic_cmd = {0}; 3678 struct uic_command uic_cmd = {0};
2818 int ret; 3679 int ret;
3680 ktime_t start = ktime_get();
2819 3681
2820 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); 3682 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
2821 3683
2822 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; 3684 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2823 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 3685 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3686 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3687 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3688
2824 if (ret) { 3689 if (ret) {
2825 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", 3690 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
2826 __func__, ret); 3691 __func__, ret);
2827 ret = ufshcd_link_recovery(hba); 3692 ret = ufshcd_link_recovery(hba);
2828 } else 3693 } else {
2829 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, 3694 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
2830 POST_CHANGE); 3695 POST_CHANGE);
3696 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3697 hba->ufs_stats.hibern8_exit_cnt++;
3698 }
2831 3699
2832 return ret; 3700 return ret;
2833} 3701}
@@ -2994,6 +3862,8 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2994 memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); 3862 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2995 3863
2996 ret = ufshcd_change_power_mode(hba, &final_params); 3864 ret = ufshcd_change_power_mode(hba, &final_params);
3865 if (!ret)
3866 ufshcd_print_pwr_info(hba);
2997 3867
2998 return ret; 3868 return ret;
2999} 3869}
@@ -3265,6 +4135,10 @@ link_startup:
3265 goto link_startup; 4135 goto link_startup;
3266 } 4136 }
3267 4137
4138 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4139 ufshcd_init_pwr_info(hba);
4140 ufshcd_print_pwr_info(hba);
4141
3268 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { 4142 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
3269 ret = ufshcd_disable_device_tx_lcc(hba); 4143 ret = ufshcd_disable_device_tx_lcc(hba);
3270 if (ret) 4144 if (ret)
@@ -3278,8 +4152,12 @@ link_startup:
3278 4152
3279 ret = ufshcd_make_hba_operational(hba); 4153 ret = ufshcd_make_hba_operational(hba);
3280out: 4154out:
3281 if (ret) 4155 if (ret) {
3282 dev_err(hba->dev, "link startup failed %d\n", ret); 4156 dev_err(hba->dev, "link startup failed %d\n", ret);
4157 ufshcd_print_host_state(hba);
4158 ufshcd_print_pwr_info(hba);
4159 ufshcd_print_host_regs(hba);
4160 }
3283 return ret; 4161 return ret;
3284} 4162}
3285 4163
@@ -3591,7 +4469,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3591 switch (ocs) { 4469 switch (ocs) {
3592 case OCS_SUCCESS: 4470 case OCS_SUCCESS:
3593 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); 4471 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3594 4472 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3595 switch (result) { 4473 switch (result) {
3596 case UPIU_TRANSACTION_RESPONSE: 4474 case UPIU_TRANSACTION_RESPONSE:
3597 /* 4475 /*
@@ -3652,10 +4530,15 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3652 default: 4530 default:
3653 result |= DID_ERROR << 16; 4531 result |= DID_ERROR << 16;
3654 dev_err(hba->dev, 4532 dev_err(hba->dev,
3655 "OCS error from controller = %x\n", ocs); 4533 "OCS error from controller = %x for tag %d\n",
4534 ocs, lrbp->task_tag);
4535 ufshcd_print_host_regs(hba);
4536 ufshcd_print_host_state(hba);
3656 break; 4537 break;
3657 } /* end of switch */ 4538 } /* end of switch */
3658 4539
4540 if (host_byte(result) != DID_OK)
4541 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
3659 return result; 4542 return result;
3660} 4543}
3661 4544
@@ -3695,6 +4578,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3695 lrbp = &hba->lrb[index]; 4578 lrbp = &hba->lrb[index];
3696 cmd = lrbp->cmd; 4579 cmd = lrbp->cmd;
3697 if (cmd) { 4580 if (cmd) {
4581 ufshcd_add_command_trace(hba, index, "complete");
3698 result = ufshcd_transfer_rsp_status(hba, lrbp); 4582 result = ufshcd_transfer_rsp_status(hba, lrbp);
3699 scsi_dma_unmap(cmd); 4583 scsi_dma_unmap(cmd);
3700 cmd->result = result; 4584 cmd->result = result;
@@ -3706,9 +4590,16 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3706 __ufshcd_release(hba); 4590 __ufshcd_release(hba);
3707 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || 4591 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
3708 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { 4592 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
3709 if (hba->dev_cmd.complete) 4593 if (hba->dev_cmd.complete) {
4594 ufshcd_add_command_trace(hba, index,
4595 "dev_complete");
3710 complete(hba->dev_cmd.complete); 4596 complete(hba->dev_cmd.complete);
4597 }
3711 } 4598 }
4599 if (ufshcd_is_clkscaling_supported(hba))
4600 hba->clk_scaling.active_reqs--;
4601 if (ufshcd_is_clkscaling_supported(hba))
4602 hba->clk_scaling.active_reqs--;
3712 } 4603 }
3713 4604
3714 /* clear corresponding bits of completed commands */ 4605 /* clear corresponding bits of completed commands */
@@ -3828,6 +4719,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
3828 } 4719 }
3829 4720
3830 hba->auto_bkops_enabled = true; 4721 hba->auto_bkops_enabled = true;
4722 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
3831 4723
3832 /* No need of URGENT_BKOPS exception from the device */ 4724 /* No need of URGENT_BKOPS exception from the device */
3833 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); 4725 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -3878,23 +4770,31 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
3878 } 4770 }
3879 4771
3880 hba->auto_bkops_enabled = false; 4772 hba->auto_bkops_enabled = false;
4773 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
3881out: 4774out:
3882 return err; 4775 return err;
3883} 4776}
3884 4777
3885/** 4778/**
3886 * ufshcd_force_reset_auto_bkops - force enable of auto bkops 4779 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
3887 * @hba: per adapter instance 4780 * @hba: per adapter instance
3888 * 4781 *
3889 * After a device reset the device may toggle the BKOPS_EN flag 4782 * After a device reset the device may toggle the BKOPS_EN flag
3890 * to default value. The s/w tracking variables should be updated 4783 * to default value. The s/w tracking variables should be updated
3891 * as well. Do this by forcing enable of auto bkops. 4784 * as well. This function would change the auto-bkops state based on
4785 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
3892 */ 4786 */
3893static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) 4787static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
3894{ 4788{
3895 hba->auto_bkops_enabled = false; 4789 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
3896 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; 4790 hba->auto_bkops_enabled = false;
3897 ufshcd_enable_auto_bkops(hba); 4791 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
4792 ufshcd_enable_auto_bkops(hba);
4793 } else {
4794 hba->auto_bkops_enabled = true;
4795 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
4796 ufshcd_disable_auto_bkops(hba);
4797 }
3898} 4798}
3899 4799
3900static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) 4800static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -4246,6 +5146,14 @@ out:
4246 pm_runtime_put_sync(hba->dev); 5146 pm_runtime_put_sync(hba->dev);
4247} 5147}
4248 5148
5149static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5150 u32 reg)
5151{
5152 reg_hist->reg[reg_hist->pos] = reg;
5153 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5154 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5155}
5156
4249/** 5157/**
4250 * ufshcd_update_uic_error - check and set fatal UIC error flags. 5158 * ufshcd_update_uic_error - check and set fatal UIC error flags.
4251 * @hba: per-adapter instance 5159 * @hba: per-adapter instance
@@ -4258,15 +5166,20 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
4258 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); 5166 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
4259 /* Ignore LINERESET indication, as this is not an error */ 5167 /* Ignore LINERESET indication, as this is not an error */
4260 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && 5168 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
4261 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) 5169 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
4262 /* 5170 /*
4263 * To know whether this error is fatal or not, DB timeout 5171 * To know whether this error is fatal or not, DB timeout
4264 * must be checked but this error is handled separately. 5172 * must be checked but this error is handled separately.
4265 */ 5173 */
4266 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); 5174 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5175 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5176 }
4267 5177
4268 /* PA_INIT_ERROR is fatal and needs UIC reset */ 5178 /* PA_INIT_ERROR is fatal and needs UIC reset */
4269 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 5179 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5180 if (reg)
5181 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5182
4270 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 5183 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
4271 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; 5184 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
4272 else if (hba->dev_quirks & 5185 else if (hba->dev_quirks &
@@ -4280,16 +5193,22 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
4280 5193
4281 /* UIC NL/TL/DME errors needs software retry */ 5194 /* UIC NL/TL/DME errors needs software retry */
4282 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); 5195 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
4283 if (reg) 5196 if (reg) {
5197 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
4284 hba->uic_error |= UFSHCD_UIC_NL_ERROR; 5198 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5199 }
4285 5200
4286 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); 5201 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
4287 if (reg) 5202 if (reg) {
5203 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
4288 hba->uic_error |= UFSHCD_UIC_TL_ERROR; 5204 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5205 }
4289 5206
4290 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); 5207 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
4291 if (reg) 5208 if (reg) {
5209 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
4292 hba->uic_error |= UFSHCD_UIC_DME_ERROR; 5210 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5211 }
4293 5212
4294 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", 5213 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
4295 __func__, hba->uic_error); 5214 __func__, hba->uic_error);
@@ -4327,6 +5246,22 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
4327 scsi_block_requests(hba->host); 5246 scsi_block_requests(hba->host);
4328 5247
4329 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED; 5248 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5249
5250 /* dump controller state before resetting */
5251 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5252 bool pr_prdt = !!(hba->saved_err &
5253 SYSTEM_BUS_FATAL_ERROR);
5254
5255 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5256 __func__, hba->saved_err,
5257 hba->saved_uic_err);
5258
5259 ufshcd_print_host_regs(hba);
5260 ufshcd_print_pwr_info(hba);
5261 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5262 ufshcd_print_trs(hba, hba->outstanding_reqs,
5263 pr_prdt);
5264 }
4330 schedule_work(&hba->eh_work); 5265 schedule_work(&hba->eh_work);
4331 } 5266 }
4332 } 5267 }
@@ -4557,7 +5492,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
4557 spin_lock_irqsave(host->host_lock, flags); 5492 spin_lock_irqsave(host->host_lock, flags);
4558 ufshcd_transfer_req_compl(hba); 5493 ufshcd_transfer_req_compl(hba);
4559 spin_unlock_irqrestore(host->host_lock, flags); 5494 spin_unlock_irqrestore(host->host_lock, flags);
5495
4560out: 5496out:
5497 hba->req_abort_count = 0;
4561 if (!err) { 5498 if (!err) {
4562 err = SUCCESS; 5499 err = SUCCESS;
4563 } else { 5500 } else {
@@ -4567,6 +5504,17 @@ out:
4567 return err; 5504 return err;
4568} 5505}
4569 5506
5507static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5508{
5509 struct ufshcd_lrb *lrbp;
5510 int tag;
5511
5512 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5513 lrbp = &hba->lrb[tag];
5514 lrbp->req_abort_skip = true;
5515 }
5516}
5517
4570/** 5518/**
4571 * ufshcd_abort - abort a specific command 5519 * ufshcd_abort - abort a specific command
4572 * @cmd: SCSI command pointer 5520 * @cmd: SCSI command pointer
@@ -4594,6 +5542,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
4594 host = cmd->device->host; 5542 host = cmd->device->host;
4595 hba = shost_priv(host); 5543 hba = shost_priv(host);
4596 tag = cmd->request->tag; 5544 tag = cmd->request->tag;
5545 lrbp = &hba->lrb[tag];
4597 if (!ufshcd_valid_tag(hba, tag)) { 5546 if (!ufshcd_valid_tag(hba, tag)) {
4598 dev_err(hba->dev, 5547 dev_err(hba->dev,
4599 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", 5548 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
@@ -4601,6 +5550,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
4601 BUG(); 5550 BUG();
4602 } 5551 }
4603 5552
5553 /*
5554 * Task abort to the device W-LUN is illegal. When this command
5555 * will fail, due to spec violation, scsi err handling next step
5556 * will be to send LU reset which, again, is a spec violation.
5557 * To avoid these unnecessary/illegal step we skip to the last error
5558 * handling stage: reset and restore.
5559 */
5560 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5561 return ufshcd_eh_host_reset_handler(cmd);
5562
4604 ufshcd_hold(hba, false); 5563 ufshcd_hold(hba, false);
4605 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 5564 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4606 /* If command is already aborted/completed, return SUCCESS */ 5565 /* If command is already aborted/completed, return SUCCESS */
@@ -4617,18 +5576,48 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
4617 __func__, tag); 5576 __func__, tag);
4618 } 5577 }
4619 5578
4620 lrbp = &hba->lrb[tag]; 5579 /* Print Transfer Request of aborted task */
5580 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5581
5582 /*
5583 * Print detailed info about aborted request.
5584 * As more than one request might get aborted at the same time,
5585 * print full information only for the first aborted request in order
5586 * to reduce repeated printouts. For other aborted requests only print
5587 * basic details.
5588 */
5589 scsi_print_command(hba->lrb[tag].cmd);
5590 if (!hba->req_abort_count) {
5591 ufshcd_print_host_regs(hba);
5592 ufshcd_print_host_state(hba);
5593 ufshcd_print_pwr_info(hba);
5594 ufshcd_print_trs(hba, 1 << tag, true);
5595 } else {
5596 ufshcd_print_trs(hba, 1 << tag, false);
5597 }
5598 hba->req_abort_count++;
5599
5600 /* Skip task abort in case previous aborts failed and report failure */
5601 if (lrbp->req_abort_skip) {
5602 err = -EIO;
5603 goto out;
5604 }
5605
4621 for (poll_cnt = 100; poll_cnt; poll_cnt--) { 5606 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
4622 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 5607 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4623 UFS_QUERY_TASK, &resp); 5608 UFS_QUERY_TASK, &resp);
4624 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { 5609 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
4625 /* cmd pending in the device */ 5610 /* cmd pending in the device */
5611 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5612 __func__, tag);
4626 break; 5613 break;
4627 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 5614 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4628 /* 5615 /*
4629 * cmd not pending in the device, check if it is 5616 * cmd not pending in the device, check if it is
4630 * in transition. 5617 * in transition.
4631 */ 5618 */
5619 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5620 __func__, tag);
4632 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 5621 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4633 if (reg & (1 << tag)) { 5622 if (reg & (1 << tag)) {
4634 /* sleep for max. 200us to stabilize */ 5623 /* sleep for max. 200us to stabilize */
@@ -4636,8 +5625,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
4636 continue; 5625 continue;
4637 } 5626 }
4638 /* command completed already */ 5627 /* command completed already */
5628 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5629 __func__, tag);
4639 goto out; 5630 goto out;
4640 } else { 5631 } else {
5632 dev_err(hba->dev,
5633 "%s: no response from device. tag = %d, err %d\n",
5634 __func__, tag, err);
4641 if (!err) 5635 if (!err)
4642 err = resp; /* service response error */ 5636 err = resp; /* service response error */
4643 goto out; 5637 goto out;
@@ -4652,14 +5646,20 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
4652 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, 5646 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
4653 UFS_ABORT_TASK, &resp); 5647 UFS_ABORT_TASK, &resp);
4654 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { 5648 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
4655 if (!err) 5649 if (!err) {
4656 err = resp; /* service response error */ 5650 err = resp; /* service response error */
5651 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5652 __func__, tag, err);
5653 }
4657 goto out; 5654 goto out;
4658 } 5655 }
4659 5656
4660 err = ufshcd_clear_cmd(hba, tag); 5657 err = ufshcd_clear_cmd(hba, tag);
4661 if (err) 5658 if (err) {
5659 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
5660 __func__, tag, err);
4662 goto out; 5661 goto out;
5662 }
4663 5663
4664 scsi_dma_unmap(cmd); 5664 scsi_dma_unmap(cmd);
4665 5665
@@ -4676,6 +5676,7 @@ out:
4676 err = SUCCESS; 5676 err = SUCCESS;
4677 } else { 5677 } else {
4678 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); 5678 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5679 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
4679 err = FAILED; 5680 err = FAILED;
4680 } 5681 }
4681 5682
@@ -4707,6 +5708,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4707 ufshcd_hba_stop(hba, false); 5708 ufshcd_hba_stop(hba, false);
4708 spin_unlock_irqrestore(hba->host->host_lock, flags); 5709 spin_unlock_irqrestore(hba->host->host_lock, flags);
4709 5710
5711 /* scale up clocks to max frequency before full reinitialization */
5712 ufshcd_scale_clks(hba, true);
5713
4710 err = ufshcd_hba_enable(hba); 5714 err = ufshcd_hba_enable(hba);
4711 if (err) 5715 if (err)
4712 goto out; 5716 goto out;
@@ -4822,7 +5826,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
4822 u16 unit; 5826 u16 unit;
4823 5827
4824 for (i = start_scan; i >= 0; i--) { 5828 for (i = start_scan; i >= 0; i--) {
4825 data = be16_to_cpu(*((u16 *)(buff + 2*i))); 5829 data = be16_to_cpup((__be16 *)&buff[2 * i]);
4826 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> 5830 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
4827 ATTR_ICC_LVL_UNIT_OFFSET; 5831 ATTR_ICC_LVL_UNIT_OFFSET;
4828 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; 5832 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
@@ -5008,8 +6012,8 @@ out:
5008 return ret; 6012 return ret;
5009} 6013}
5010 6014
5011static int ufs_get_device_info(struct ufs_hba *hba, 6015static int ufs_get_device_desc(struct ufs_hba *hba,
5012 struct ufs_device_info *card_data) 6016 struct ufs_dev_desc *dev_desc)
5013{ 6017{
5014 int err; 6018 int err;
5015 u8 model_index; 6019 u8 model_index;
@@ -5028,7 +6032,7 @@ static int ufs_get_device_info(struct ufs_hba *hba,
5028 * getting vendor (manufacturerID) and Bank Index in big endian 6032 * getting vendor (manufacturerID) and Bank Index in big endian
5029 * format 6033 * format
5030 */ 6034 */
5031 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | 6035 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
5032 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; 6036 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
5033 6037
5034 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 6038 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -5042,36 +6046,26 @@ static int ufs_get_device_info(struct ufs_hba *hba,
5042 } 6046 }
5043 6047
5044 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; 6048 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
5045 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), 6049 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
5046 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], 6050 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
5047 MAX_MODEL_LEN)); 6051 MAX_MODEL_LEN));
5048 6052
5049 /* Null terminate the model string */ 6053 /* Null terminate the model string */
5050 card_data->model[MAX_MODEL_LEN] = '\0'; 6054 dev_desc->model[MAX_MODEL_LEN] = '\0';
5051 6055
5052out: 6056out:
5053 return err; 6057 return err;
5054} 6058}
5055 6059
5056void ufs_advertise_fixup_device(struct ufs_hba *hba) 6060static void ufs_fixup_device_setup(struct ufs_hba *hba,
6061 struct ufs_dev_desc *dev_desc)
5057{ 6062{
5058 int err;
5059 struct ufs_dev_fix *f; 6063 struct ufs_dev_fix *f;
5060 struct ufs_device_info card_data;
5061
5062 card_data.wmanufacturerid = 0;
5063
5064 err = ufs_get_device_info(hba, &card_data);
5065 if (err) {
5066 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
5067 __func__, err);
5068 return;
5069 }
5070 6064
5071 for (f = ufs_fixups; f->quirk; f++) { 6065 for (f = ufs_fixups; f->quirk; f++) {
5072 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) || 6066 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
5073 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) && 6067 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
5074 (STR_PRFX_EQUAL(f->card.model, card_data.model) || 6068 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
5075 !strcmp(f->card.model, UFS_ANY_MODEL))) 6069 !strcmp(f->card.model, UFS_ANY_MODEL)))
5076 hba->dev_quirks |= f->quirk; 6070 hba->dev_quirks |= f->quirk;
5077 } 6071 }
@@ -5241,6 +6235,22 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
5241 ufshcd_vops_apply_dev_quirks(hba); 6235 ufshcd_vops_apply_dev_quirks(hba);
5242} 6236}
5243 6237
6238static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6239{
6240 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6241
6242 hba->ufs_stats.hibern8_exit_cnt = 0;
6243 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6244
6245 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6246 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6247 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6248 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6249 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6250
6251 hba->req_abort_count = 0;
6252}
6253
5244/** 6254/**
5245 * ufshcd_probe_hba - probe hba to detect device and initialize 6255 * ufshcd_probe_hba - probe hba to detect device and initialize
5246 * @hba: per-adapter instance 6256 * @hba: per-adapter instance
@@ -5249,18 +6259,21 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
5249 */ 6259 */
5250static int ufshcd_probe_hba(struct ufs_hba *hba) 6260static int ufshcd_probe_hba(struct ufs_hba *hba)
5251{ 6261{
6262 struct ufs_dev_desc card = {0};
5252 int ret; 6263 int ret;
6264 ktime_t start = ktime_get();
5253 6265
5254 ret = ufshcd_link_startup(hba); 6266 ret = ufshcd_link_startup(hba);
5255 if (ret) 6267 if (ret)
5256 goto out; 6268 goto out;
5257 6269
5258 ufshcd_init_pwr_info(hba);
5259
5260 /* set the default level for urgent bkops */ 6270 /* set the default level for urgent bkops */
5261 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; 6271 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5262 hba->is_urgent_bkops_lvl_checked = false; 6272 hba->is_urgent_bkops_lvl_checked = false;
5263 6273
6274 /* Debug counters initialization */
6275 ufshcd_clear_dbg_ufs_stats(hba);
6276
5264 /* UniPro link is active now */ 6277 /* UniPro link is active now */
5265 ufshcd_set_link_active(hba); 6278 ufshcd_set_link_active(hba);
5266 6279
@@ -5272,7 +6285,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
5272 if (ret) 6285 if (ret)
5273 goto out; 6286 goto out;
5274 6287
5275 ufs_advertise_fixup_device(hba); 6288 ret = ufs_get_device_desc(hba, &card);
6289 if (ret) {
6290 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6291 __func__, ret);
6292 goto out;
6293 }
6294
6295 ufs_fixup_device_setup(hba, &card);
5276 ufshcd_tune_unipro_params(hba); 6296 ufshcd_tune_unipro_params(hba);
5277 6297
5278 ret = ufshcd_set_vccq_rail_unused(hba, 6298 ret = ufshcd_set_vccq_rail_unused(hba,
@@ -5320,6 +6340,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
5320 if (ufshcd_scsi_add_wlus(hba)) 6340 if (ufshcd_scsi_add_wlus(hba))
5321 goto out; 6341 goto out;
5322 6342
6343 /* Initialize devfreq after UFS device is detected */
6344 if (ufshcd_is_clkscaling_supported(hba)) {
6345 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6346 &hba->pwr_info,
6347 sizeof(struct ufs_pa_layer_attr));
6348 hba->clk_scaling.saved_pwr_info.is_valid = true;
6349 if (!hba->devfreq) {
6350 hba->devfreq = devm_devfreq_add_device(hba->dev,
6351 &ufs_devfreq_profile,
6352 "simple_ondemand",
6353 NULL);
6354 if (IS_ERR(hba->devfreq)) {
6355 ret = PTR_ERR(hba->devfreq);
6356 dev_err(hba->dev, "Unable to register with devfreq %d\n",
6357 ret);
6358 goto out;
6359 }
6360 }
6361 hba->clk_scaling.is_allowed = true;
6362 }
6363
5323 scsi_scan_host(hba->host); 6364 scsi_scan_host(hba->host);
5324 pm_runtime_put_sync(hba->dev); 6365 pm_runtime_put_sync(hba->dev);
5325 } 6366 }
@@ -5327,9 +6368,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
5327 if (!hba->is_init_prefetch) 6368 if (!hba->is_init_prefetch)
5328 hba->is_init_prefetch = true; 6369 hba->is_init_prefetch = true;
5329 6370
5330 /* Resume devfreq after UFS device is detected */
5331 ufshcd_resume_clkscaling(hba);
5332
5333out: 6371out:
5334 /* 6372 /*
5335 * If we failed to initialize the device or the device is not 6373 * If we failed to initialize the device or the device is not
@@ -5340,6 +6378,9 @@ out:
5340 ufshcd_hba_exit(hba); 6378 ufshcd_hba_exit(hba);
5341 } 6379 }
5342 6380
6381 trace_ufshcd_init(dev_name(hba->dev), ret,
6382 ktime_to_us(ktime_sub(ktime_get(), start)),
6383 hba->curr_dev_pwr_mode, hba->uic_link_state);
5343 return ret; 6384 return ret;
5344} 6385}
5345 6386
@@ -5650,6 +6691,8 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5650 struct ufs_clk_info *clki; 6691 struct ufs_clk_info *clki;
5651 struct list_head *head = &hba->clk_list_head; 6692 struct list_head *head = &hba->clk_list_head;
5652 unsigned long flags; 6693 unsigned long flags;
6694 ktime_t start = ktime_get();
6695 bool clk_state_changed = false;
5653 6696
5654 if (!head || list_empty(head)) 6697 if (!head || list_empty(head))
5655 goto out; 6698 goto out;
@@ -5663,6 +6706,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
5663 if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) 6706 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
5664 continue; 6707 continue;
5665 6708
6709 clk_state_changed = on ^ clki->enabled;
5666 if (on && !clki->enabled) { 6710 if (on && !clki->enabled) {
5667 ret = clk_prepare_enable(clki->clk); 6711 ret = clk_prepare_enable(clki->clk);
5668 if (ret) { 6712 if (ret) {
@@ -5689,11 +6733,18 @@ out:
5689 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 6733 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
5690 clk_disable_unprepare(clki->clk); 6734 clk_disable_unprepare(clki->clk);
5691 } 6735 }
5692 } else if (on) { 6736 } else if (!ret && on) {
5693 spin_lock_irqsave(hba->host->host_lock, flags); 6737 spin_lock_irqsave(hba->host->host_lock, flags);
5694 hba->clk_gating.state = CLKS_ON; 6738 hba->clk_gating.state = CLKS_ON;
6739 trace_ufshcd_clk_gating(dev_name(hba->dev),
6740 hba->clk_gating.state);
5695 spin_unlock_irqrestore(hba->host->host_lock, flags); 6741 spin_unlock_irqrestore(hba->host->host_lock, flags);
5696 } 6742 }
6743
6744 if (clk_state_changed)
6745 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
6746 (on ? "on" : "off"),
6747 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
5697 return ret; 6748 return ret;
5698} 6749}
5699 6750
@@ -5835,6 +6886,11 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
5835 ufshcd_variant_hba_exit(hba); 6886 ufshcd_variant_hba_exit(hba);
5836 ufshcd_setup_vreg(hba, false); 6887 ufshcd_setup_vreg(hba, false);
5837 ufshcd_suspend_clkscaling(hba); 6888 ufshcd_suspend_clkscaling(hba);
6889 if (ufshcd_is_clkscaling_supported(hba)) {
6890 if (hba->devfreq)
6891 ufshcd_suspend_clkscaling(hba);
6892 destroy_workqueue(hba->clk_scaling.workq);
6893 }
5838 ufshcd_setup_clocks(hba, false); 6894 ufshcd_setup_clocks(hba, false);
5839 ufshcd_setup_hba_vreg(hba, false); 6895 ufshcd_setup_hba_vreg(hba, false);
5840 hba->is_powered = false; 6896 hba->is_powered = false;
@@ -6110,7 +7166,11 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6110 ufshcd_hold(hba, false); 7166 ufshcd_hold(hba, false);
6111 hba->clk_gating.is_suspended = true; 7167 hba->clk_gating.is_suspended = true;
6112 7168
6113 ufshcd_suspend_clkscaling(hba); 7169 if (hba->clk_scaling.is_allowed) {
7170 cancel_work_sync(&hba->clk_scaling.suspend_work);
7171 cancel_work_sync(&hba->clk_scaling.resume_work);
7172 ufshcd_suspend_clkscaling(hba);
7173 }
6114 7174
6115 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 7175 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
6116 req_link_state == UIC_LINK_ACTIVE_STATE) { 7176 req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -6176,6 +7236,7 @@ disable_clks:
6176 __ufshcd_setup_clocks(hba, false, true); 7236 __ufshcd_setup_clocks(hba, false, true);
6177 7237
6178 hba->clk_gating.state = CLKS_OFF; 7238 hba->clk_gating.state = CLKS_OFF;
7239 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
6179 /* 7240 /*
6180 * Disable the host irq as host controller as there won't be any 7241 * Disable the host irq as host controller as there won't be any
6181 * host controller transaction expected till resume. 7242 * host controller transaction expected till resume.
@@ -6186,7 +7247,8 @@ disable_clks:
6186 goto out; 7247 goto out;
6187 7248
6188set_link_active: 7249set_link_active:
6189 ufshcd_resume_clkscaling(hba); 7250 if (hba->clk_scaling.is_allowed)
7251 ufshcd_resume_clkscaling(hba);
6190 ufshcd_vreg_set_hpm(hba); 7252 ufshcd_vreg_set_hpm(hba);
6191 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) 7253 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
6192 ufshcd_set_link_active(hba); 7254 ufshcd_set_link_active(hba);
@@ -6196,7 +7258,8 @@ set_dev_active:
6196 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) 7258 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
6197 ufshcd_disable_auto_bkops(hba); 7259 ufshcd_disable_auto_bkops(hba);
6198enable_gating: 7260enable_gating:
6199 ufshcd_resume_clkscaling(hba); 7261 if (hba->clk_scaling.is_allowed)
7262 ufshcd_resume_clkscaling(hba);
6200 hba->clk_gating.is_suspended = false; 7263 hba->clk_gating.is_suspended = false;
6201 ufshcd_release(hba); 7264 ufshcd_release(hba);
6202out: 7265out:
@@ -6268,14 +7331,19 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
6268 goto set_old_link_state; 7331 goto set_old_link_state;
6269 } 7332 }
6270 7333
6271 /* 7334 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
6272 * If BKOPs operations are urgently needed at this moment then 7335 ufshcd_enable_auto_bkops(hba);
6273 * keep auto-bkops enabled or else disable it. 7336 else
6274 */ 7337 /*
6275 ufshcd_urgent_bkops(hba); 7338 * If BKOPs operations are urgently needed at this moment then
7339 * keep auto-bkops enabled or else disable it.
7340 */
7341 ufshcd_urgent_bkops(hba);
7342
6276 hba->clk_gating.is_suspended = false; 7343 hba->clk_gating.is_suspended = false;
6277 7344
6278 ufshcd_resume_clkscaling(hba); 7345 if (hba->clk_scaling.is_allowed)
7346 ufshcd_resume_clkscaling(hba);
6279 7347
6280 /* Schedule clock gating in case of no access to UFS device yet */ 7348 /* Schedule clock gating in case of no access to UFS device yet */
6281 ufshcd_release(hba); 7349 ufshcd_release(hba);
@@ -6289,7 +7357,8 @@ disable_vreg:
6289 ufshcd_vreg_set_lpm(hba); 7357 ufshcd_vreg_set_lpm(hba);
6290disable_irq_and_vops_clks: 7358disable_irq_and_vops_clks:
6291 ufshcd_disable_irq(hba); 7359 ufshcd_disable_irq(hba);
6292 ufshcd_suspend_clkscaling(hba); 7360 if (hba->clk_scaling.is_allowed)
7361 ufshcd_suspend_clkscaling(hba);
6293 ufshcd_setup_clocks(hba, false); 7362 ufshcd_setup_clocks(hba, false);
6294out: 7363out:
6295 hba->pm_op_in_progress = 0; 7364 hba->pm_op_in_progress = 0;
@@ -6308,6 +7377,7 @@ out:
6308int ufshcd_system_suspend(struct ufs_hba *hba) 7377int ufshcd_system_suspend(struct ufs_hba *hba)
6309{ 7378{
6310 int ret = 0; 7379 int ret = 0;
7380 ktime_t start = ktime_get();
6311 7381
6312 if (!hba || !hba->is_powered) 7382 if (!hba || !hba->is_powered)
6313 return 0; 7383 return 0;
@@ -6334,6 +7404,9 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
6334 7404
6335 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); 7405 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
6336out: 7406out:
7407 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7408 ktime_to_us(ktime_sub(ktime_get(), start)),
7409 hba->curr_dev_pwr_mode, hba->uic_link_state);
6337 if (!ret) 7410 if (!ret)
6338 hba->is_sys_suspended = true; 7411 hba->is_sys_suspended = true;
6339 return ret; 7412 return ret;
@@ -6349,6 +7422,9 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
6349 7422
6350int ufshcd_system_resume(struct ufs_hba *hba) 7423int ufshcd_system_resume(struct ufs_hba *hba)
6351{ 7424{
7425 int ret = 0;
7426 ktime_t start = ktime_get();
7427
6352 if (!hba) 7428 if (!hba)
6353 return -EINVAL; 7429 return -EINVAL;
6354 7430
@@ -6357,9 +7433,14 @@ int ufshcd_system_resume(struct ufs_hba *hba)
6357 * Let the runtime resume take care of resuming 7433 * Let the runtime resume take care of resuming
6358 * if runtime suspended. 7434 * if runtime suspended.
6359 */ 7435 */
6360 return 0; 7436 goto out;
6361 7437 else
6362 return ufshcd_resume(hba, UFS_SYSTEM_PM); 7438 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7439out:
7440 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7441 ktime_to_us(ktime_sub(ktime_get(), start)),
7442 hba->curr_dev_pwr_mode, hba->uic_link_state);
7443 return ret;
6363} 7444}
6364EXPORT_SYMBOL(ufshcd_system_resume); 7445EXPORT_SYMBOL(ufshcd_system_resume);
6365 7446
@@ -6373,13 +7454,21 @@ EXPORT_SYMBOL(ufshcd_system_resume);
6373 */ 7454 */
6374int ufshcd_runtime_suspend(struct ufs_hba *hba) 7455int ufshcd_runtime_suspend(struct ufs_hba *hba)
6375{ 7456{
7457 int ret = 0;
7458 ktime_t start = ktime_get();
7459
6376 if (!hba) 7460 if (!hba)
6377 return -EINVAL; 7461 return -EINVAL;
6378 7462
6379 if (!hba->is_powered) 7463 if (!hba->is_powered)
6380 return 0; 7464 goto out;
6381 7465 else
6382 return ufshcd_suspend(hba, UFS_RUNTIME_PM); 7466 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7467out:
7468 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7469 ktime_to_us(ktime_sub(ktime_get(), start)),
7470 hba->curr_dev_pwr_mode, hba->uic_link_state);
7471 return ret;
6383} 7472}
6384EXPORT_SYMBOL(ufshcd_runtime_suspend); 7473EXPORT_SYMBOL(ufshcd_runtime_suspend);
6385 7474
@@ -6406,13 +7495,21 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
6406 */ 7495 */
6407int ufshcd_runtime_resume(struct ufs_hba *hba) 7496int ufshcd_runtime_resume(struct ufs_hba *hba)
6408{ 7497{
7498 int ret = 0;
7499 ktime_t start = ktime_get();
7500
6409 if (!hba) 7501 if (!hba)
6410 return -EINVAL; 7502 return -EINVAL;
6411 7503
6412 if (!hba->is_powered) 7504 if (!hba->is_powered)
6413 return 0; 7505 goto out;
6414 7506 else
6415 return ufshcd_resume(hba, UFS_RUNTIME_PM); 7507 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7508out:
7509 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7510 ktime_to_us(ktime_sub(ktime_get(), start)),
7511 hba->curr_dev_pwr_mode, hba->uic_link_state);
7512 return ret;
6416} 7513}
6417EXPORT_SYMBOL(ufshcd_runtime_resume); 7514EXPORT_SYMBOL(ufshcd_runtime_resume);
6418 7515
@@ -6422,6 +7519,127 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
6422} 7519}
6423EXPORT_SYMBOL(ufshcd_runtime_idle); 7520EXPORT_SYMBOL(ufshcd_runtime_idle);
6424 7521
7522static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
7523 struct device_attribute *attr,
7524 const char *buf, size_t count,
7525 bool rpm)
7526{
7527 struct ufs_hba *hba = dev_get_drvdata(dev);
7528 unsigned long flags, value;
7529
7530 if (kstrtoul(buf, 0, &value))
7531 return -EINVAL;
7532
7533 if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
7534 return -EINVAL;
7535
7536 spin_lock_irqsave(hba->host->host_lock, flags);
7537 if (rpm)
7538 hba->rpm_lvl = value;
7539 else
7540 hba->spm_lvl = value;
7541 spin_unlock_irqrestore(hba->host->host_lock, flags);
7542 return count;
7543}
7544
7545static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
7546 struct device_attribute *attr, char *buf)
7547{
7548 struct ufs_hba *hba = dev_get_drvdata(dev);
7549 int curr_len;
7550 u8 lvl;
7551
7552 curr_len = snprintf(buf, PAGE_SIZE,
7553 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
7554 hba->rpm_lvl,
7555 ufschd_ufs_dev_pwr_mode_to_string(
7556 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
7557 ufschd_uic_link_state_to_string(
7558 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
7559
7560 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7561 "\nAll available Runtime PM levels info:\n");
7562 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7563 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7564 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
7565 lvl,
7566 ufschd_ufs_dev_pwr_mode_to_string(
7567 ufs_pm_lvl_states[lvl].dev_state),
7568 ufschd_uic_link_state_to_string(
7569 ufs_pm_lvl_states[lvl].link_state));
7570
7571 return curr_len;
7572}
7573
7574static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
7575 struct device_attribute *attr, const char *buf, size_t count)
7576{
7577 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
7578}
7579
7580static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
7581{
7582 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
7583 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
7584 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
7585 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
7586 hba->rpm_lvl_attr.attr.mode = 0644;
7587 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
7588 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
7589}
7590
7591static ssize_t ufshcd_spm_lvl_show(struct device *dev,
7592 struct device_attribute *attr, char *buf)
7593{
7594 struct ufs_hba *hba = dev_get_drvdata(dev);
7595 int curr_len;
7596 u8 lvl;
7597
7598 curr_len = snprintf(buf, PAGE_SIZE,
7599 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
7600 hba->spm_lvl,
7601 ufschd_ufs_dev_pwr_mode_to_string(
7602 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
7603 ufschd_uic_link_state_to_string(
7604 ufs_pm_lvl_states[hba->spm_lvl].link_state));
7605
7606 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7607 "\nAll available System PM levels info:\n");
7608 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7609 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7610 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
7611 lvl,
7612 ufschd_ufs_dev_pwr_mode_to_string(
7613 ufs_pm_lvl_states[lvl].dev_state),
7614 ufschd_uic_link_state_to_string(
7615 ufs_pm_lvl_states[lvl].link_state));
7616
7617 return curr_len;
7618}
7619
7620static ssize_t ufshcd_spm_lvl_store(struct device *dev,
7621 struct device_attribute *attr, const char *buf, size_t count)
7622{
7623 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
7624}
7625
7626static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
7627{
7628 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
7629 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
7630 sysfs_attr_init(&hba->spm_lvl_attr.attr);
7631 hba->spm_lvl_attr.attr.name = "spm_lvl";
7632 hba->spm_lvl_attr.attr.mode = 0644;
7633 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
7634 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
7635}
7636
7637static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
7638{
7639 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
7640 ufshcd_add_spm_lvl_sysfs_nodes(hba);
7641}
7642
6425/** 7643/**
6426 * ufshcd_shutdown - shutdown routine 7644 * ufshcd_shutdown - shutdown routine
6427 * @hba: per adapter instance 7645 * @hba: per adapter instance
@@ -6465,6 +7683,8 @@ void ufshcd_remove(struct ufs_hba *hba)
6465 ufshcd_hba_stop(hba, true); 7683 ufshcd_hba_stop(hba, true);
6466 7684
6467 ufshcd_exit_clk_gating(hba); 7685 ufshcd_exit_clk_gating(hba);
7686 if (ufshcd_is_clkscaling_supported(hba))
7687 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
6468 ufshcd_hba_exit(hba); 7688 ufshcd_hba_exit(hba);
6469} 7689}
6470EXPORT_SYMBOL_GPL(ufshcd_remove); 7690EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6531,149 +7751,6 @@ out_error:
6531} 7751}
6532EXPORT_SYMBOL(ufshcd_alloc_host); 7752EXPORT_SYMBOL(ufshcd_alloc_host);
6533 7753
6534static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
6535{
6536 int ret = 0;
6537 struct ufs_clk_info *clki;
6538 struct list_head *head = &hba->clk_list_head;
6539
6540 if (!head || list_empty(head))
6541 goto out;
6542
6543 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
6544 if (ret)
6545 return ret;
6546
6547 list_for_each_entry(clki, head, list) {
6548 if (!IS_ERR_OR_NULL(clki->clk)) {
6549 if (scale_up && clki->max_freq) {
6550 if (clki->curr_freq == clki->max_freq)
6551 continue;
6552 ret = clk_set_rate(clki->clk, clki->max_freq);
6553 if (ret) {
6554 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6555 __func__, clki->name,
6556 clki->max_freq, ret);
6557 break;
6558 }
6559 clki->curr_freq = clki->max_freq;
6560
6561 } else if (!scale_up && clki->min_freq) {
6562 if (clki->curr_freq == clki->min_freq)
6563 continue;
6564 ret = clk_set_rate(clki->clk, clki->min_freq);
6565 if (ret) {
6566 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6567 __func__, clki->name,
6568 clki->min_freq, ret);
6569 break;
6570 }
6571 clki->curr_freq = clki->min_freq;
6572 }
6573 }
6574 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
6575 clki->name, clk_get_rate(clki->clk));
6576 }
6577
6578 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
6579
6580out:
6581 return ret;
6582}
6583
6584static int ufshcd_devfreq_target(struct device *dev,
6585 unsigned long *freq, u32 flags)
6586{
6587 int err = 0;
6588 struct ufs_hba *hba = dev_get_drvdata(dev);
6589 bool release_clk_hold = false;
6590 unsigned long irq_flags;
6591
6592 if (!ufshcd_is_clkscaling_enabled(hba))
6593 return -EINVAL;
6594
6595 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6596 if (ufshcd_eh_in_progress(hba)) {
6597 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6598 return 0;
6599 }
6600
6601 if (ufshcd_is_clkgating_allowed(hba) &&
6602 (hba->clk_gating.state != CLKS_ON)) {
6603 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
6604 /* hold the vote until the scaling work is completed */
6605 hba->clk_gating.active_reqs++;
6606 release_clk_hold = true;
6607 hba->clk_gating.state = CLKS_ON;
6608 } else {
6609 /*
6610 * Clock gating work seems to be running in parallel
6611 * hence skip scaling work to avoid deadlock between
6612 * current scaling work and gating work.
6613 */
6614 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6615 return 0;
6616 }
6617 }
6618 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6619
6620 if (*freq == UINT_MAX)
6621 err = ufshcd_scale_clks(hba, true);
6622 else if (*freq == 0)
6623 err = ufshcd_scale_clks(hba, false);
6624
6625 spin_lock_irqsave(hba->host->host_lock, irq_flags);
6626 if (release_clk_hold)
6627 __ufshcd_release(hba);
6628 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6629
6630 return err;
6631}
6632
6633static int ufshcd_devfreq_get_dev_status(struct device *dev,
6634 struct devfreq_dev_status *stat)
6635{
6636 struct ufs_hba *hba = dev_get_drvdata(dev);
6637 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
6638 unsigned long flags;
6639
6640 if (!ufshcd_is_clkscaling_enabled(hba))
6641 return -EINVAL;
6642
6643 memset(stat, 0, sizeof(*stat));
6644
6645 spin_lock_irqsave(hba->host->host_lock, flags);
6646 if (!scaling->window_start_t)
6647 goto start_window;
6648
6649 if (scaling->is_busy_started)
6650 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
6651 scaling->busy_start_t));
6652
6653 stat->total_time = jiffies_to_usecs((long)jiffies -
6654 (long)scaling->window_start_t);
6655 stat->busy_time = scaling->tot_busy_t;
6656start_window:
6657 scaling->window_start_t = jiffies;
6658 scaling->tot_busy_t = 0;
6659
6660 if (hba->outstanding_reqs) {
6661 scaling->busy_start_t = ktime_get();
6662 scaling->is_busy_started = true;
6663 } else {
6664 scaling->busy_start_t = 0;
6665 scaling->is_busy_started = false;
6666 }
6667 spin_unlock_irqrestore(hba->host->host_lock, flags);
6668 return 0;
6669}
6670
6671static struct devfreq_dev_profile ufs_devfreq_profile = {
6672 .polling_ms = 100,
6673 .target = ufshcd_devfreq_target,
6674 .get_dev_status = ufshcd_devfreq_get_dev_status,
6675};
6676
6677/** 7754/**
6678 * ufshcd_init - Driver initialization routine 7755 * ufshcd_init - Driver initialization routine
6679 * @hba: per-adapter instance 7756 * @hba: per-adapter instance
@@ -6757,6 +7834,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6757 /* Initialize mutex for device management commands */ 7834 /* Initialize mutex for device management commands */
6758 mutex_init(&hba->dev_cmd.lock); 7835 mutex_init(&hba->dev_cmd.lock);
6759 7836
7837 init_rwsem(&hba->clk_scaling_lock);
7838
6760 /* Initialize device management tag acquire wait queue */ 7839 /* Initialize device management tag acquire wait queue */
6761 init_waitqueue_head(&hba->dev_cmd.tag_wq); 7840 init_waitqueue_head(&hba->dev_cmd.tag_wq);
6762 7841
@@ -6795,22 +7874,38 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6795 err = ufshcd_hba_enable(hba); 7874 err = ufshcd_hba_enable(hba);
6796 if (err) { 7875 if (err) {
6797 dev_err(hba->dev, "Host controller enable failed\n"); 7876 dev_err(hba->dev, "Host controller enable failed\n");
7877 ufshcd_print_host_regs(hba);
7878 ufshcd_print_host_state(hba);
6798 goto out_remove_scsi_host; 7879 goto out_remove_scsi_host;
6799 } 7880 }
6800 7881
6801 if (ufshcd_is_clkscaling_enabled(hba)) { 7882 if (ufshcd_is_clkscaling_supported(hba)) {
6802 hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile, 7883 char wq_name[sizeof("ufs_clkscaling_00")];
6803 "simple_ondemand", NULL); 7884
6804 if (IS_ERR(hba->devfreq)) { 7885 INIT_WORK(&hba->clk_scaling.suspend_work,
6805 dev_err(hba->dev, "Unable to register with devfreq %ld\n", 7886 ufshcd_clk_scaling_suspend_work);
6806 PTR_ERR(hba->devfreq)); 7887 INIT_WORK(&hba->clk_scaling.resume_work,
6807 err = PTR_ERR(hba->devfreq); 7888 ufshcd_clk_scaling_resume_work);
6808 goto out_remove_scsi_host; 7889
6809 } 7890 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
6810 /* Suspend devfreq until the UFS device is detected */ 7891 host->host_no);
6811 ufshcd_suspend_clkscaling(hba); 7892 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
7893
7894 ufshcd_clkscaling_init_sysfs(hba);
6812 } 7895 }
6813 7896
7897 /*
7898 * Set the default power management level for runtime and system PM.
7899 * Default power saving mode is to keep UFS link in Hibern8 state
7900 * and UFS device in sleep state.
7901 */
7902 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7903 UFS_SLEEP_PWR_MODE,
7904 UIC_LINK_HIBERN8_STATE);
7905 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7906 UFS_SLEEP_PWR_MODE,
7907 UIC_LINK_HIBERN8_STATE);
7908
6814 /* Hold auto suspend until async scan completes */ 7909 /* Hold auto suspend until async scan completes */
6815 pm_runtime_get_sync(dev); 7910 pm_runtime_get_sync(dev);
6816 7911
@@ -6823,6 +7918,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
6823 ufshcd_set_ufs_dev_active(hba); 7918 ufshcd_set_ufs_dev_active(hba);
6824 7919
6825 async_schedule(ufshcd_async_scan, hba); 7920 async_schedule(ufshcd_async_scan, hba);
7921 ufshcd_add_sysfs_nodes(hba);
6826 7922
6827 return 0; 7923 return 0;
6828 7924
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 08cd26ed2382..7630600217a2 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -45,6 +45,7 @@
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/spinlock.h> 47#include <linux/spinlock.h>
48#include <linux/rwsem.h>
48#include <linux/workqueue.h> 49#include <linux/workqueue.h>
49#include <linux/errno.h> 50#include <linux/errno.h>
50#include <linux/types.h> 51#include <linux/types.h>
@@ -152,6 +153,10 @@ struct ufs_pm_lvl_states {
152 * @ucd_req_ptr: UCD address of the command 153 * @ucd_req_ptr: UCD address of the command
153 * @ucd_rsp_ptr: Response UPIU address for this command 154 * @ucd_rsp_ptr: Response UPIU address for this command
154 * @ucd_prdt_ptr: PRDT address of the command 155 * @ucd_prdt_ptr: PRDT address of the command
156 * @utrd_dma_addr: UTRD dma address for debug
157 * @ucd_prdt_dma_addr: PRDT dma address for debug
158 * @ucd_rsp_dma_addr: UPIU response dma address for debug
159 * @ucd_req_dma_addr: UPIU request dma address for debug
155 * @cmd: pointer to SCSI command 160 * @cmd: pointer to SCSI command
156 * @sense_buffer: pointer to sense buffer address of the SCSI command 161 * @sense_buffer: pointer to sense buffer address of the SCSI command
157 * @sense_bufflen: Length of the sense buffer 162 * @sense_bufflen: Length of the sense buffer
@@ -160,6 +165,8 @@ struct ufs_pm_lvl_states {
160 * @task_tag: Task tag of the command 165 * @task_tag: Task tag of the command
161 * @lun: LUN of the command 166 * @lun: LUN of the command
162 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) 167 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
168 * @issue_time_stamp: time stamp for debug purposes
169 * @req_abort_skip: skip request abort task flag
163 */ 170 */
164struct ufshcd_lrb { 171struct ufshcd_lrb {
165 struct utp_transfer_req_desc *utr_descriptor_ptr; 172 struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -167,6 +174,11 @@ struct ufshcd_lrb {
167 struct utp_upiu_rsp *ucd_rsp_ptr; 174 struct utp_upiu_rsp *ucd_rsp_ptr;
168 struct ufshcd_sg_entry *ucd_prdt_ptr; 175 struct ufshcd_sg_entry *ucd_prdt_ptr;
169 176
177 dma_addr_t utrd_dma_addr;
178 dma_addr_t ucd_req_dma_addr;
179 dma_addr_t ucd_rsp_dma_addr;
180 dma_addr_t ucd_prdt_dma_addr;
181
170 struct scsi_cmnd *cmd; 182 struct scsi_cmnd *cmd;
171 u8 *sense_buffer; 183 u8 *sense_buffer;
172 unsigned int sense_bufflen; 184 unsigned int sense_bufflen;
@@ -176,6 +188,9 @@ struct ufshcd_lrb {
176 int task_tag; 188 int task_tag;
177 u8 lun; /* UPIU LUN id field is only 8-bit wide */ 189 u8 lun; /* UPIU LUN id field is only 8-bit wide */
178 bool intr_cmd; 190 bool intr_cmd;
191 ktime_t issue_time_stamp;
192
193 bool req_abort_skip;
179}; 194};
180 195
181/** 196/**
@@ -320,6 +335,8 @@ enum clk_gating_state {
320 * @is_suspended: clk gating is suspended when set to 1 which can be used 335 * @is_suspended: clk gating is suspended when set to 1 which can be used
321 * during suspend/resume 336 * during suspend/resume
322 * @delay_attr: sysfs attribute to control delay_attr 337 * @delay_attr: sysfs attribute to control delay_attr
338 * @enable_attr: sysfs attribute to enable/disable clock gating
339 * @is_enabled: Indicates the current status of clock gating
323 * @active_reqs: number of requests that are pending and should be waited for 340 * @active_reqs: number of requests that are pending and should be waited for
324 * completion before gating clocks. 341 * completion before gating clocks.
325 */ 342 */
@@ -330,14 +347,47 @@ struct ufs_clk_gating {
330 unsigned long delay_ms; 347 unsigned long delay_ms;
331 bool is_suspended; 348 bool is_suspended;
332 struct device_attribute delay_attr; 349 struct device_attribute delay_attr;
350 struct device_attribute enable_attr;
351 bool is_enabled;
333 int active_reqs; 352 int active_reqs;
334}; 353};
335 354
355struct ufs_saved_pwr_info {
356 struct ufs_pa_layer_attr info;
357 bool is_valid;
358};
359
360/**
361 * struct ufs_clk_scaling - UFS clock scaling related data
362 * @active_reqs: number of requests that are pending. If this is zero when
363 * devfreq ->target() function is called then schedule "suspend_work" to
364 * suspend devfreq.
365 * @tot_busy_t: Total busy time in current polling window
366 * @window_start_t: Start time (in jiffies) of the current polling window
367 * @busy_start_t: Start time of current busy period
368 * @enable_attr: sysfs attribute to enable/disable clock scaling
369 * @saved_pwr_info: UFS power mode may also be changed during scaling and this
370 * one keeps track of previous power mode.
371 * @workq: workqueue to schedule devfreq suspend/resume work
372 * @suspend_work: worker to suspend devfreq
373 * @resume_work: worker to resume devfreq
374 * @is_allowed: tracks if scaling is currently allowed or not
375 * @is_busy_started: tracks if busy period has started or not
376 * @is_suspended: tracks if devfreq is suspended or not
377 */
336struct ufs_clk_scaling { 378struct ufs_clk_scaling {
337 ktime_t busy_start_t; 379 int active_reqs;
338 bool is_busy_started; 380 unsigned long tot_busy_t;
339 unsigned long tot_busy_t;
340 unsigned long window_start_t; 381 unsigned long window_start_t;
382 ktime_t busy_start_t;
383 struct device_attribute enable_attr;
384 struct ufs_saved_pwr_info saved_pwr_info;
385 struct workqueue_struct *workq;
386 struct work_struct suspend_work;
387 struct work_struct resume_work;
388 bool is_allowed;
389 bool is_busy_started;
390 bool is_suspended;
341}; 391};
342 392
343/** 393/**
@@ -349,6 +399,41 @@ struct ufs_init_prefetch {
349 u32 icc_level; 399 u32 icc_level;
350}; 400};
351 401
402#define UIC_ERR_REG_HIST_LENGTH 8
403/**
404 * struct ufs_uic_err_reg_hist - keeps history of uic errors
405 * @pos: index to indicate cyclic buffer position
406 * @reg: cyclic buffer for registers value
407 * @tstamp: cyclic buffer for time stamp
408 */
409struct ufs_uic_err_reg_hist {
410 int pos;
411 u32 reg[UIC_ERR_REG_HIST_LENGTH];
412 ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
413};
414
415/**
416 * struct ufs_stats - keeps usage/err statistics
417 * @hibern8_exit_cnt: Counter to keep track of number of exits,
418 * reset this after link-startup.
419 * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
420 * Clear after the first successful command completion.
421 * @pa_err: tracks pa-uic errors
422 * @dl_err: tracks dl-uic errors
423 * @nl_err: tracks nl-uic errors
424 * @tl_err: tracks tl-uic errors
425 * @dme_err: tracks dme errors
426 */
427struct ufs_stats {
428 u32 hibern8_exit_cnt;
429 ktime_t last_hibern8_exit_tstamp;
430 struct ufs_uic_err_reg_hist pa_err;
431 struct ufs_uic_err_reg_hist dl_err;
432 struct ufs_uic_err_reg_hist nl_err;
433 struct ufs_uic_err_reg_hist tl_err;
434 struct ufs_uic_err_reg_hist dme_err;
435};
436
352/** 437/**
353 * struct ufs_hba - per adapter private structure 438 * struct ufs_hba - per adapter private structure
354 * @mmio_base: UFSHCI base register address 439 * @mmio_base: UFSHCI base register address
@@ -429,6 +514,8 @@ struct ufs_hba {
429 enum ufs_pm_level rpm_lvl; 514 enum ufs_pm_level rpm_lvl;
430 /* Desired UFS power management level during system PM */ 515 /* Desired UFS power management level during system PM */
431 enum ufs_pm_level spm_lvl; 516 enum ufs_pm_level spm_lvl;
517 struct device_attribute rpm_lvl_attr;
518 struct device_attribute spm_lvl_attr;
432 int pm_op_in_progress; 519 int pm_op_in_progress;
433 520
434 struct ufshcd_lrb *lrb; 521 struct ufshcd_lrb *lrb;
@@ -523,6 +610,7 @@ struct ufs_hba {
523 u32 uic_error; 610 u32 uic_error;
524 u32 saved_err; 611 u32 saved_err;
525 u32 saved_uic_err; 612 u32 saved_uic_err;
613 struct ufs_stats ufs_stats;
526 614
527 /* Device management request data */ 615 /* Device management request data */
528 struct ufs_dev_cmd dev_cmd; 616 struct ufs_dev_cmd dev_cmd;
@@ -536,6 +624,9 @@ struct ufs_hba {
536 624
537 bool wlun_dev_clr_ua; 625 bool wlun_dev_clr_ua;
538 626
627 /* Number of requests aborts */
628 int req_abort_count;
629
539 /* Number of lanes available (1 or 2) for Rx/Tx */ 630 /* Number of lanes available (1 or 2) for Rx/Tx */
540 u32 lanes_per_direction; 631 u32 lanes_per_direction;
541 struct ufs_pa_layer_attr pwr_info; 632 struct ufs_pa_layer_attr pwr_info;
@@ -558,6 +649,14 @@ struct ufs_hba {
558 * CAUTION: Enabling this might reduce overall UFS throughput. 649 * CAUTION: Enabling this might reduce overall UFS throughput.
559 */ 650 */
560#define UFSHCD_CAP_INTR_AGGR (1 << 4) 651#define UFSHCD_CAP_INTR_AGGR (1 << 4)
652 /*
653 * This capability allows the device auto-bkops to be always enabled
654 * except during suspend (both runtime and suspend).
655 * Enabling this capability means that device will always be allowed
656 * to do background operation when it's active but it might degrade
657 * the performance of ongoing read/write operations.
658 */
659#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
561 660
562 struct devfreq *devfreq; 661 struct devfreq *devfreq;
563 struct ufs_clk_scaling clk_scaling; 662 struct ufs_clk_scaling clk_scaling;
@@ -565,6 +664,8 @@ struct ufs_hba {
565 664
566 enum bkops_status urgent_bkops_lvl; 665 enum bkops_status urgent_bkops_lvl;
567 bool is_urgent_bkops_lvl_checked; 666 bool is_urgent_bkops_lvl_checked;
667
668 struct rw_semaphore clk_scaling_lock;
568}; 669};
569 670
570/* Returns true if clocks can be gated. Otherwise false */ 671/* Returns true if clocks can be gated. Otherwise false */
@@ -576,7 +677,7 @@ static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
576{ 677{
577 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 678 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
578} 679}
579static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba) 680static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
580{ 681{
581 return hba->caps & UFSHCD_CAP_CLK_SCALING; 682 return hba->caps & UFSHCD_CAP_CLK_SCALING;
582} 683}
@@ -655,6 +756,11 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba)
655 BUG_ON(!hba); 756 BUG_ON(!hba);
656 return hba->priv; 757 return hba->priv;
657} 758}
759static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
760 struct ufs_hba *hba)
761{
762 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
763}
658 764
659extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 765extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
660extern int ufshcd_runtime_resume(struct ufs_hba *hba); 766extern int ufshcd_runtime_resume(struct ufs_hba *hba);
@@ -713,8 +819,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
713 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 819 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
714} 820}
715 821
716int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
717
718static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) 822static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
719{ 823{
720 return (pwr_info->pwr_rx == FAST_MODE || 824 return (pwr_info->pwr_rx == FAST_MODE ||
@@ -723,11 +827,6 @@ static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
723 pwr_info->pwr_tx == FASTAUTO_MODE); 827 pwr_info->pwr_tx == FASTAUTO_MODE);
724} 828}
725 829
726#define ASCII_STD true
727
728int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
729 u32 size, bool ascii);
730
731/* Expose Query-Request API */ 830/* Expose Query-Request API */
732int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 831int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
733 enum flag_idn idn, bool *flag_res); 832 enum flag_idn idn, bool *flag_res);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 8c5190e2e1c9..d14e9b965d1e 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -72,6 +72,9 @@ enum {
72 REG_UIC_COMMAND_ARG_1 = 0x94, 72 REG_UIC_COMMAND_ARG_1 = 0x94,
73 REG_UIC_COMMAND_ARG_2 = 0x98, 73 REG_UIC_COMMAND_ARG_2 = 0x98,
74 REG_UIC_COMMAND_ARG_3 = 0x9C, 74 REG_UIC_COMMAND_ARG_3 = 0x9C,
75
76 UFSHCI_REG_SPACE_SIZE = 0xA0,
77
75 REG_UFS_CCAP = 0x100, 78 REG_UFS_CCAP = 0x100,
76 REG_UFS_CRYPTOCAP = 0x104, 79 REG_UFS_CRYPTOCAP = 0x104,
77 80
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 15ca09cd16f3..ef474a748744 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -68,10 +68,7 @@ struct pvscsi_ctx {
68 68
69struct pvscsi_adapter { 69struct pvscsi_adapter {
70 char *mmioBase; 70 char *mmioBase;
71 unsigned int irq;
72 u8 rev; 71 u8 rev;
73 bool use_msi;
74 bool use_msix;
75 bool use_msg; 72 bool use_msg;
76 bool use_req_threshold; 73 bool use_req_threshold;
77 74
@@ -1161,30 +1158,26 @@ static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
1161static irqreturn_t pvscsi_isr(int irq, void *devp) 1158static irqreturn_t pvscsi_isr(int irq, void *devp)
1162{ 1159{
1163 struct pvscsi_adapter *adapter = devp; 1160 struct pvscsi_adapter *adapter = devp;
1164 int handled; 1161 unsigned long flags;
1165
1166 if (adapter->use_msi || adapter->use_msix)
1167 handled = true;
1168 else {
1169 u32 val = pvscsi_read_intr_status(adapter);
1170 handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1171 if (handled)
1172 pvscsi_write_intr_status(devp, val);
1173 }
1174
1175 if (handled) {
1176 unsigned long flags;
1177 1162
1178 spin_lock_irqsave(&adapter->hw_lock, flags); 1163 spin_lock_irqsave(&adapter->hw_lock, flags);
1164 pvscsi_process_completion_ring(adapter);
1165 if (adapter->use_msg && pvscsi_msg_pending(adapter))
1166 queue_work(adapter->workqueue, &adapter->work);
1167 spin_unlock_irqrestore(&adapter->hw_lock, flags);
1179 1168
1180 pvscsi_process_completion_ring(adapter); 1169 return IRQ_HANDLED;
1181 if (adapter->use_msg && pvscsi_msg_pending(adapter)) 1170}
1182 queue_work(adapter->workqueue, &adapter->work);
1183 1171
1184 spin_unlock_irqrestore(&adapter->hw_lock, flags); 1172static irqreturn_t pvscsi_shared_isr(int irq, void *devp)
1185 } 1173{
1174 struct pvscsi_adapter *adapter = devp;
1175 u32 val = pvscsi_read_intr_status(adapter);
1186 1176
1187 return IRQ_RETVAL(handled); 1177 if (!(val & PVSCSI_INTR_ALL_SUPPORTED))
1178 return IRQ_NONE;
1179 pvscsi_write_intr_status(devp, val);
1180 return pvscsi_isr(irq, devp);
1188} 1181}
1189 1182
1190static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) 1183static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
@@ -1196,34 +1189,10 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
1196 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); 1189 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
1197} 1190}
1198 1191
1199static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
1200 unsigned int *irq)
1201{
1202 struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
1203 int ret;
1204
1205 ret = pci_enable_msix_exact(adapter->dev, &entry, 1);
1206 if (ret)
1207 return ret;
1208
1209 *irq = entry.vector;
1210
1211 return 0;
1212}
1213
1214static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) 1192static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1215{ 1193{
1216 if (adapter->irq) { 1194 free_irq(pci_irq_vector(adapter->dev, 0), adapter);
1217 free_irq(adapter->irq, adapter); 1195 pci_free_irq_vectors(adapter->dev);
1218 adapter->irq = 0;
1219 }
1220 if (adapter->use_msi) {
1221 pci_disable_msi(adapter->dev);
1222 adapter->use_msi = 0;
1223 } else if (adapter->use_msix) {
1224 pci_disable_msix(adapter->dev);
1225 adapter->use_msix = 0;
1226 }
1227} 1196}
1228 1197
1229static void pvscsi_release_resources(struct pvscsi_adapter *adapter) 1198static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
@@ -1359,11 +1328,11 @@ exit:
1359 1328
1360static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1329static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1361{ 1330{
1331 unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY;
1362 struct pvscsi_adapter *adapter; 1332 struct pvscsi_adapter *adapter;
1363 struct pvscsi_adapter adapter_temp; 1333 struct pvscsi_adapter adapter_temp;
1364 struct Scsi_Host *host = NULL; 1334 struct Scsi_Host *host = NULL;
1365 unsigned int i; 1335 unsigned int i;
1366 unsigned long flags = 0;
1367 int error; 1336 int error;
1368 u32 max_id; 1337 u32 max_id;
1369 1338
@@ -1512,30 +1481,33 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1512 goto out_reset_adapter; 1481 goto out_reset_adapter;
1513 } 1482 }
1514 1483
1515 if (!pvscsi_disable_msix && 1484 if (pvscsi_disable_msix)
1516 pvscsi_setup_msix(adapter, &adapter->irq) == 0) { 1485 irq_flag &= ~PCI_IRQ_MSIX;
1517 printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); 1486 if (pvscsi_disable_msi)
1518 adapter->use_msix = 1; 1487 irq_flag &= ~PCI_IRQ_MSI;
1519 } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { 1488
1520 printk(KERN_INFO "vmw_pvscsi: using MSI\n"); 1489 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
1521 adapter->use_msi = 1; 1490 if (error)
1522 adapter->irq = pdev->irq; 1491 goto out_reset_adapter;
1523 } else {
1524 printk(KERN_INFO "vmw_pvscsi: using INTx\n");
1525 adapter->irq = pdev->irq;
1526 flags = IRQF_SHARED;
1527 }
1528 1492
1529 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); 1493 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
1530 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", 1494 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
1531 adapter->use_req_threshold ? "en" : "dis"); 1495 adapter->use_req_threshold ? "en" : "dis");
1532 1496
1533 error = request_irq(adapter->irq, pvscsi_isr, flags, 1497 if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) {
1534 "vmw_pvscsi", adapter); 1498 printk(KERN_INFO "vmw_pvscsi: using MSI%s\n",
1499 adapter->dev->msix_enabled ? "-X" : "");
1500 error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr,
1501 0, "vmw_pvscsi", adapter);
1502 } else {
1503 printk(KERN_INFO "vmw_pvscsi: using INTx\n");
1504 error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr,
1505 IRQF_SHARED, "vmw_pvscsi", adapter);
1506 }
1507
1535 if (error) { 1508 if (error) {
1536 printk(KERN_ERR 1509 printk(KERN_ERR
1537 "vmw_pvscsi: unable to request IRQ: %d\n", error); 1510 "vmw_pvscsi: unable to request IRQ: %d\n", error);
1538 adapter->irq = 0;
1539 goto out_reset_adapter; 1511 goto out_reset_adapter;
1540 } 1512 }
1541 1513
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index d41292ef85f2..75966d3f326e 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -423,11 +423,6 @@ struct PVSCSIConfigPageController {
423#define PVSCSI_MAX_INTRS 24 423#define PVSCSI_MAX_INTRS 24
424 424
425/* 425/*
426 * Enumeration of supported MSI-X vectors
427 */
428#define PVSCSI_VECTOR_COMPLETION 0
429
430/*
431 * Misc constants for the rings. 426 * Misc constants for the rings.
432 */ 427 */
433 428
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c170be548b7f..46e18c0619c6 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1130,6 +1130,7 @@ extern int ata_sas_port_start(struct ata_port *ap);
1130extern void ata_sas_port_stop(struct ata_port *ap); 1130extern void ata_sas_port_stop(struct ata_port *ap);
1131extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); 1131extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
1132extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); 1132extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
1133extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
1133extern int sata_scr_valid(struct ata_link *link); 1134extern int sata_scr_valid(struct ata_link *link);
1134extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); 1135extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
1135extern int sata_scr_write(struct ata_link *link, int reg, u32 val); 1136extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@@ -1355,6 +1356,7 @@ extern struct device_attribute *ata_common_sdev_attrs[];
1355 .proc_name = drv_name, \ 1356 .proc_name = drv_name, \
1356 .slave_configure = ata_scsi_slave_config, \ 1357 .slave_configure = ata_scsi_slave_config, \
1357 .slave_destroy = ata_scsi_slave_destroy, \ 1358 .slave_destroy = ata_scsi_slave_destroy, \
1359 .eh_timed_out = ata_scsi_timed_out, \
1358 .bios_param = ata_std_bios_param, \ 1360 .bios_param = ata_std_bios_param, \
1359 .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ 1361 .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
1360 .sdev_attrs = ata_common_sdev_attrs 1362 .sdev_attrs = ata_common_sdev_attrs
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 4d1c46aac331..b0e275de6dec 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -383,6 +383,7 @@ extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
383extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); 383extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
384extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); 384extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
385extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc); 385extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc);
386extern enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc);
386 387
387/* 388/*
388 * iSCSI host helpers. 389 * iSCSI host helpers.
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8ec7c30e35af..a1e1930b7a87 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -29,16 +29,6 @@ enum scsi_timeouts {
29 */ 29 */
30#define SCAN_WILD_CARD ~0 30#define SCAN_WILD_CARD ~0
31 31
32#ifdef CONFIG_ACPI
33struct acpi_bus_type;
34
35extern int
36scsi_register_acpi_bus_type(struct acpi_bus_type *bus);
37
38extern void
39scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus);
40#endif
41
42/** scsi_status_is_good - check the status return. 32/** scsi_status_is_good - check the status return.
43 * 33 *
44 * @status: the status passed up from the driver (including host and 34 * @status: the status passed up from the driver (including host and
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index b6e07b56d013..a3dcb1bfb362 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -56,29 +56,6 @@ struct scsi_transport_template {
56 * Allows a transport to override the default error handler. 56 * Allows a transport to override the default error handler.
57 */ 57 */
58 void (* eh_strategy_handler)(struct Scsi_Host *); 58 void (* eh_strategy_handler)(struct Scsi_Host *);
59
60 /*
61 * This is an optional routine that allows the transport to become
62 * involved when a scsi io timer fires. The return value tells the
63 * timer routine how to finish the io timeout handling:
64 * EH_HANDLED: I fixed the error, please complete the command
65 * EH_RESET_TIMER: I need more time, reset the timer and
66 * begin counting again
67 * EH_NOT_HANDLED Begin normal error recovery
68 */
69 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
70
71 /*
72 * Used as callback for the completion of i_t_nexus request
73 * for target drivers.
74 */
75 int (* it_nexus_response)(struct Scsi_Host *, u64, int);
76
77 /*
78 * Used as callback for the completion of task management
79 * request for target drivers.
80 */
81 int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
82}; 59};
83 60
84#define transport_class_to_shost(tc) \ 61#define transport_class_to_shost(tc) \
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 924c8e614b45..b21b8aa58c4d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -808,6 +808,7 @@ struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
808 struct fc_vport_identifiers *); 808 struct fc_vport_identifiers *);
809int fc_vport_terminate(struct fc_vport *vport); 809int fc_vport_terminate(struct fc_vport *vport);
810int fc_block_scsi_eh(struct scsi_cmnd *cmnd); 810int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
811enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd);
811 812
812static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job) 813static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
813{ 814{
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index d40d3ef25707..dd096330734e 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -88,10 +88,6 @@ struct srp_rport {
88 * @terminate_rport_io: Callback function for terminating all outstanding I/O 88 * @terminate_rport_io: Callback function for terminating all outstanding I/O
89 * requests for an rport. 89 * requests for an rport.
90 * @rport_delete: Callback function that deletes an rport. 90 * @rport_delete: Callback function that deletes an rport.
91 *
92 * Fields that are only relevant for SRP target drivers:
93 * @tsk_mgmt_response: Callback function for sending a task management response.
94 * @it_nexus_response: Callback function for processing an IT nexus response.
95 */ 91 */
96struct srp_function_template { 92struct srp_function_template {
97 /* for initiator drivers */ 93 /* for initiator drivers */
@@ -103,9 +99,6 @@ struct srp_function_template {
103 int (*reconnect)(struct srp_rport *rport); 99 int (*reconnect)(struct srp_rport *rport);
104 void (*terminate_rport_io)(struct srp_rport *rport); 100 void (*terminate_rport_io)(struct srp_rport *rport);
105 void (*rport_delete)(struct srp_rport *rport); 101 void (*rport_delete)(struct srp_rport *rport);
106 /* for target drivers */
107 int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
108 int (* it_nexus_response)(struct Scsi_Host *, u64, int);
109}; 102};
110 103
111extern struct scsi_transport_template * 104extern struct scsi_transport_template *
@@ -124,6 +117,7 @@ extern int srp_reconnect_rport(struct srp_rport *rport);
124extern void srp_start_tl_fail_timers(struct srp_rport *rport); 117extern void srp_start_tl_fail_timers(struct srp_rport *rport);
125extern void srp_remove_host(struct Scsi_Host *); 118extern void srp_remove_host(struct Scsi_Host *);
126extern void srp_stop_rport_timers(struct srp_rport *rport); 119extern void srp_stop_rport_timers(struct srp_rport *rport);
120enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd);
127 121
128/** 122/**
129 * srp_chkready() - evaluate the transport layer state before I/O 123 * srp_chkready() - evaluate the transport layer state before I/O
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
new file mode 100644
index 000000000000..bf6f82673492
--- /dev/null
+++ b/include/trace/events/ufs.h
@@ -0,0 +1,263 @@
1/*
2 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#undef TRACE_SYSTEM
15#define TRACE_SYSTEM ufs
16
17#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
18#define _TRACE_UFS_H
19
20#include <linux/tracepoint.h>
21
22#define UFS_LINK_STATES \
23 EM(UIC_LINK_OFF_STATE) \
24 EM(UIC_LINK_ACTIVE_STATE) \
25 EMe(UIC_LINK_HIBERN8_STATE)
26
27#define UFS_PWR_MODES \
28 EM(UFS_ACTIVE_PWR_MODE) \
29 EM(UFS_SLEEP_PWR_MODE) \
30 EMe(UFS_POWERDOWN_PWR_MODE)
31
32#define UFSCHD_CLK_GATING_STATES \
33 EM(CLKS_OFF) \
34 EM(CLKS_ON) \
35 EM(REQ_CLKS_OFF) \
36 EMe(REQ_CLKS_ON)
37
38/* Enums require being exported to userspace, for user tool parsing */
39#undef EM
40#undef EMe
41#define EM(a) TRACE_DEFINE_ENUM(a);
42#define EMe(a) TRACE_DEFINE_ENUM(a);
43
44UFS_LINK_STATES;
45UFS_PWR_MODES;
46UFSCHD_CLK_GATING_STATES;
47
48/*
49 * Now redefine the EM() and EMe() macros to map the enums to the strings
50 * that will be printed in the output.
51 */
52#undef EM
53#undef EMe
54#define EM(a) { a, #a },
55#define EMe(a) { a, #a }
56
57TRACE_EVENT(ufshcd_clk_gating,
58
59 TP_PROTO(const char *dev_name, int state),
60
61 TP_ARGS(dev_name, state),
62
63 TP_STRUCT__entry(
64 __string(dev_name, dev_name)
65 __field(int, state)
66 ),
67
68 TP_fast_assign(
69 __assign_str(dev_name, dev_name);
70 __entry->state = state;
71 ),
72
73 TP_printk("%s: gating state changed to %s",
74 __get_str(dev_name),
75 __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
76);
77
78TRACE_EVENT(ufshcd_clk_scaling,
79
80 TP_PROTO(const char *dev_name, const char *state, const char *clk,
81 u32 prev_state, u32 curr_state),
82
83 TP_ARGS(dev_name, state, clk, prev_state, curr_state),
84
85 TP_STRUCT__entry(
86 __string(dev_name, dev_name)
87 __string(state, state)
88 __string(clk, clk)
89 __field(u32, prev_state)
90 __field(u32, curr_state)
91 ),
92
93 TP_fast_assign(
94 __assign_str(dev_name, dev_name);
95 __assign_str(state, state);
96 __assign_str(clk, clk);
97 __entry->prev_state = prev_state;
98 __entry->curr_state = curr_state;
99 ),
100
101 TP_printk("%s: %s %s from %u to %u Hz",
102 __get_str(dev_name), __get_str(state), __get_str(clk),
103 __entry->prev_state, __entry->curr_state)
104);
105
106TRACE_EVENT(ufshcd_auto_bkops_state,
107
108 TP_PROTO(const char *dev_name, const char *state),
109
110 TP_ARGS(dev_name, state),
111
112 TP_STRUCT__entry(
113 __string(dev_name, dev_name)
114 __string(state, state)
115 ),
116
117 TP_fast_assign(
118 __assign_str(dev_name, dev_name);
119 __assign_str(state, state);
120 ),
121
122 TP_printk("%s: auto bkops - %s",
123 __get_str(dev_name), __get_str(state))
124);
125
126DECLARE_EVENT_CLASS(ufshcd_profiling_template,
127 TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
128 int err),
129
130 TP_ARGS(dev_name, profile_info, time_us, err),
131
132 TP_STRUCT__entry(
133 __string(dev_name, dev_name)
134 __string(profile_info, profile_info)
135 __field(s64, time_us)
136 __field(int, err)
137 ),
138
139 TP_fast_assign(
140 __assign_str(dev_name, dev_name);
141 __assign_str(profile_info, profile_info);
142 __entry->time_us = time_us;
143 __entry->err = err;
144 ),
145
146 TP_printk("%s: %s: took %lld usecs, err %d",
147 __get_str(dev_name), __get_str(profile_info),
148 __entry->time_us, __entry->err)
149);
150
151DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
152 TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
153 int err),
154 TP_ARGS(dev_name, profile_info, time_us, err));
155
156DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
157 TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
158 int err),
159 TP_ARGS(dev_name, profile_info, time_us, err));
160
161DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
162 TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
163 int err),
164 TP_ARGS(dev_name, profile_info, time_us, err));
165
166DECLARE_EVENT_CLASS(ufshcd_template,
167 TP_PROTO(const char *dev_name, int err, s64 usecs,
168 int dev_state, int link_state),
169
170 TP_ARGS(dev_name, err, usecs, dev_state, link_state),
171
172 TP_STRUCT__entry(
173 __field(s64, usecs)
174 __field(int, err)
175 __string(dev_name, dev_name)
176 __field(int, dev_state)
177 __field(int, link_state)
178 ),
179
180 TP_fast_assign(
181 __entry->usecs = usecs;
182 __entry->err = err;
183 __assign_str(dev_name, dev_name);
184 __entry->dev_state = dev_state;
185 __entry->link_state = link_state;
186 ),
187
188 TP_printk(
189 "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
190 __get_str(dev_name),
191 __entry->usecs,
192 __print_symbolic(__entry->dev_state, UFS_PWR_MODES),
193 __print_symbolic(__entry->link_state, UFS_LINK_STATES),
194 __entry->err
195 )
196);
197
198DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
199 TP_PROTO(const char *dev_name, int err, s64 usecs,
200 int dev_state, int link_state),
201 TP_ARGS(dev_name, err, usecs, dev_state, link_state));
202
203DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
204 TP_PROTO(const char *dev_name, int err, s64 usecs,
205 int dev_state, int link_state),
206 TP_ARGS(dev_name, err, usecs, dev_state, link_state));
207
208DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
209 TP_PROTO(const char *dev_name, int err, s64 usecs,
210 int dev_state, int link_state),
211 TP_ARGS(dev_name, err, usecs, dev_state, link_state));
212
213DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
214 TP_PROTO(const char *dev_name, int err, s64 usecs,
215 int dev_state, int link_state),
216 TP_ARGS(dev_name, err, usecs, dev_state, link_state));
217
218DEFINE_EVENT(ufshcd_template, ufshcd_init,
219 TP_PROTO(const char *dev_name, int err, s64 usecs,
220 int dev_state, int link_state),
221 TP_ARGS(dev_name, err, usecs, dev_state, link_state));
222
223TRACE_EVENT(ufshcd_command,
224 TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
225 u32 doorbell, int transfer_len, u32 intr, u64 lba,
226 u8 opcode),
227
228 TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode),
229
230 TP_STRUCT__entry(
231 __string(dev_name, dev_name)
232 __string(str, str)
233 __field(unsigned int, tag)
234 __field(u32, doorbell)
235 __field(int, transfer_len)
236 __field(u32, intr)
237 __field(u64, lba)
238 __field(u8, opcode)
239 ),
240
241 TP_fast_assign(
242 __assign_str(dev_name, dev_name);
243 __assign_str(str, str);
244 __entry->tag = tag;
245 __entry->doorbell = doorbell;
246 __entry->transfer_len = transfer_len;
247 __entry->intr = intr;
248 __entry->lba = lba;
249 __entry->opcode = opcode;
250 ),
251
252 TP_printk(
253 "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
254 __get_str(str), __get_str(dev_name), __entry->tag,
255 __entry->doorbell, __entry->transfer_len,
256 __entry->intr, __entry->lba, (u32)__entry->opcode
257 )
258);
259
260#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
261
262/* This part must be outside protection */
263#include <trace/define_trace.h>
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
index 6bf1f8a022b1..e9fdc12ad984 100644
--- a/include/uapi/scsi/cxlflash_ioctl.h
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -40,6 +40,7 @@ struct dk_cxlflash_hdr {
40 */ 40 */
41#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL 41#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL
42#define DK_CXLFLASH_APP_CLOSE_ADAP_FD 0x0000000000000002ULL 42#define DK_CXLFLASH_APP_CLOSE_ADAP_FD 0x0000000000000002ULL
43#define DK_CXLFLASH_CONTEXT_SQ_CMD_MODE 0x0000000000000004ULL
43 44
44/* 45/*
45 * General Notes: 46 * General Notes: