aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 18:48:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 18:48:06 -0400
commitf66c83d059d1ed90968caa81d401f160912b063a (patch)
tree8558803eadc5c29038de16d88b02b4f6176850ac /drivers/scsi
parentd472d9d98b463dd7a04f2bcdeafe4261686ce6ab (diff)
parent1f962ebcdfa15cede59e9edb299d1330949eec92 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "This patch set is a set of driver updates (ufs, zfcp, lpfc, mpt2/3sas, qla4xxx, qla2xxx [adding support for ISP8044 + other things]). We also have a new driver: esas2r which has a number of static checker problems, but which I expect to resolve over the -rc course of 3.12 under the new driver exception. We also have the error return that were discussed at LSF" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (118 commits) [SCSI] sg: push file descriptor list locking down to per-device locking [SCSI] sg: checking sdp->detached isn't protected when open [SCSI] sg: no need sg_open_exclusive_lock [SCSI] sg: use rwsem to solve race during exclusive open [SCSI] scsi_debug: fix logical block provisioning support when unmap_alignment != 0 [SCSI] scsi_debug: fix endianness bug in sdebug_build_parts() [SCSI] qla2xxx: Update the driver version to 8.06.00.08-k. [SCSI] qla2xxx: print MAC via %pMR. [SCSI] qla2xxx: Correction to message ids. [SCSI] qla2xxx: Correctly print out/in mailbox registers. [SCSI] qla2xxx: Add a new interface to update versions. [SCSI] qla2xxx: Move queue depth ramp down message to i/o debug level. [SCSI] qla2xxx: Select link initialization option bits from current operating mode. [SCSI] qla2xxx: Add loopback IDC-TIME-EXTEND aen handling support. [SCSI] qla2xxx: Set default critical temperature value in cases when ISPFX00 firmware doesn't provide it [SCSI] qla2xxx: QLAFX00 make over temperature AEN handling informational, add log for normal temperature AEN [SCSI] qla2xxx: Correct Interrupt Register offset for ISPFX00 [SCSI] qla2xxx: Remove handling of Shutdown Requested AEN from qlafx00_process_aen(). [SCSI] qla2xxx: Send all AENs for ISPFx00 to above layers. [SCSI] qla2xxx: Add changes in initialization for ISPFX00 cards with BIOS ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/eata_pio.c2
-rw-r--r--drivers/scsi/esas2r/Kconfig5
-rw-r--r--drivers/scsi/esas2r/Makefile5
-rw-r--r--drivers/scsi/esas2r/atioctl.h1254
-rw-r--r--drivers/scsi/esas2r/atvda.h1319
-rw-r--r--drivers/scsi/esas2r/esas2r.h1441
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c1189
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1512
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c1773
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c941
-rw-r--r--drivers/scsi/esas2r/esas2r_io.c880
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2110
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c254
-rw-r--r--drivers/scsi/esas2r/esas2r_log.h118
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2032
-rw-r--r--drivers/scsi/esas2r/esas2r_targdb.c306
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c521
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ipr.c14
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/libiscsi.c109
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c42
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c147
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c5
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h10
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_type.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c40
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h10
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c82
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c5
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h74
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c91
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c297
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c164
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h41
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c117
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c3716
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h551
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c213
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c162
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c69
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h36
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c90
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c13
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h29
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c65
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c171
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c297
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c357
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h4
-rw-r--r--drivers/scsi/scsi_debug.c14
-rw-r--r--drivers/scsi/scsi_error.c128
-rw-r--r--drivers/scsi/scsi_lib.c48
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c123
-rw-r--r--drivers/scsi/sd.c149
-rw-r--r--drivers/scsi/sg.c176
-rw-r--r--drivers/scsi/st.c27
-rw-r--r--drivers/scsi/ufs/ufs.h155
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c99
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c57
-rw-r--r--drivers/scsi/ufs/ufshcd.c1170
-rw-r--r--drivers/scsi/ufs/ufshcd.h59
-rw-r--r--drivers/scsi/ufs/ufshci.h2
127 files changed, 24360 insertions, 1319 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 92ff027746f2..fe25677a5511 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -601,6 +601,7 @@ config SCSI_ARCMSR
601 To compile this driver as a module, choose M here: the 601 To compile this driver as a module, choose M here: the
602 module will be called arcmsr (modprobe arcmsr). 602 module will be called arcmsr (modprobe arcmsr).
603 603
604source "drivers/scsi/esas2r/Kconfig"
604source "drivers/scsi/megaraid/Kconfig.megaraid" 605source "drivers/scsi/megaraid/Kconfig.megaraid"
605source "drivers/scsi/mpt2sas/Kconfig" 606source "drivers/scsi/mpt2sas/Kconfig"
606source "drivers/scsi/mpt3sas/Kconfig" 607source "drivers/scsi/mpt3sas/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b607ba4f5630..149bb6bf1849 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
141obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ 141obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
142obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 142obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
143obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ 143obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
144obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
144obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 145obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
145obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o 146obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
146obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o 147obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 9611195d6703..f8ca7becacca 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
63u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; 63u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
64u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; 64u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
65 65
66#define BFAD_FW_FILE_CB "cbfw-3.2.1.0.bin" 66#define BFAD_FW_FILE_CB "cbfw-3.2.1.1.bin"
67#define BFAD_FW_FILE_CT "ctfw-3.2.1.0.bin" 67#define BFAD_FW_FILE_CT "ctfw-3.2.1.1.bin"
68#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" 68#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
69 69
70static u32 *bfad_load_fwimg(struct pci_dev *pdev); 70static u32 *bfad_load_fwimg(struct pci_dev *pdev);
71static void bfad_free_fwimg(void); 71static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 25093a04123b..3d33767f2f2c 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI 1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 * 2 *
3 * Copyright (c) 2006 - 2012 Broadcom Corporation 3 * Copyright (c) 2006 - 2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index f2db5fe7bdc2..37049e433c9e 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. 1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 * 2 *
3 * Copyright (c) 2006 - 2012 Broadcom Corporation 3 * Copyright (c) 2006 - 2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index f109e3b073c3..6940f0930a84 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2012 Broadcom Corporation 3 * Copyright (c) 2006 - 2013 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index a28b03e5a5f6..af3e675d4d48 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2012 Broadcom Corporation 3 * Copyright (c) 2006 - 2013 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 50fef6963a81..b6f6f436777b 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2006 - 2012 Broadcom Corporation 3 * Copyright (c) 2006 - 2013 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie 5 * Copyright (c) 2007, 2008 Mike Christie
6 * 6 *
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
18static u32 adapter_count; 18static u32 adapter_count;
19 19
20#define DRV_MODULE_NAME "bnx2i" 20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.7.2.2" 21#define DRV_MODULE_VERSION "2.7.6.2"
22#define DRV_MODULE_RELDATE "Apr 25, 2012" 22#define DRV_MODULE_RELDATE "Jun 06, 2013"
23 23
24static char version[] = 24static char version[] =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 0056e47bd56e..fabeb88602ac 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. 2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 * 3 *
4 * Copyright (c) 2006 - 2012 Broadcom Corporation 4 * Copyright (c) 2006 - 2013 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie 6 * Copyright (c) 2007, 2008 Mike Christie
7 * 7 *
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index c61cf7a43658..a0a3d9fe61fe 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. 1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 * 2 *
3 * Copyright (c) 2004 - 2012 Broadcom Corporation 3 * Copyright (c) 2004 - 2013 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 356def44ce58..1663173cdb91 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -919,7 +919,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
919 find_pio_EISA(&gc); 919 find_pio_EISA(&gc);
920 find_pio_ISA(&gc); 920 find_pio_ISA(&gc);
921 921
922 for (i = 0; i <= MAXIRQ; i++) 922 for (i = 0; i < MAXIRQ; i++)
923 if (reg_IRQ[i]) 923 if (reg_IRQ[i])
924 request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL); 924 request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
925 925
diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig
new file mode 100644
index 000000000000..78fdbfd9b4b7
--- /dev/null
+++ b/drivers/scsi/esas2r/Kconfig
@@ -0,0 +1,5 @@
1config SCSI_ESAS2R
2 tristate "ATTO Technology's ExpressSAS RAID adapter driver"
3 depends on PCI && SCSI
4 ---help---
5 This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.
diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile
new file mode 100644
index 000000000000..c77160b8c8bd
--- /dev/null
+++ b/drivers/scsi/esas2r/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o
2
3esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
4 esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \
5 esas2r_vda.o esas2r_main.o
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
new file mode 100644
index 000000000000..4aca3d52c851
--- /dev/null
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -0,0 +1,1254 @@
1/* linux/drivers/scsi/esas2r/atioctl.h
2 * ATTO IOCTL Handling
3 *
4 * Copyright (c) 2001-2013 ATTO Technology, Inc.
5 * (mailto:linuxdrivers@attotech.com)
6 */
7/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
8/*
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41 */
42/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
43
44#include "atvda.h"
45
46#ifndef ATIOCTL_H
47#define ATIOCTL_H
48
49#define EXPRESS_IOCTL_SIGNATURE "Express"
50#define EXPRESS_IOCTL_SIGNATURE_SIZE 8
51
52/* structure definitions for IOCTls */
53
54struct __packed atto_express_ioctl_header {
55 u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE];
56 u8 return_code;
57
58#define IOCTL_SUCCESS 0
59#define IOCTL_ERR_INVCMD 101
60#define IOCTL_INIT_FAILED 102
61#define IOCTL_NOT_IMPLEMENTED 103
62#define IOCTL_BAD_CHANNEL 104
63#define IOCTL_TARGET_OVERRUN 105
64#define IOCTL_TARGET_NOT_ENABLED 106
65#define IOCTL_BAD_FLASH_IMGTYPE 107
66#define IOCTL_OUT_OF_RESOURCES 108
67#define IOCTL_GENERAL_ERROR 109
68#define IOCTL_INVALID_PARAM 110
69
70 u8 channel;
71 u8 retries;
72 u8 pad[5];
73};
74
75/*
76 * NOTE - if channel == 0xFF, the request is
77 * handled on the adapter it came in on.
78 */
79#define MAX_NODE_NAMES 256
80
81struct __packed atto_firmware_rw_request {
82 u8 function;
83 #define FUNC_FW_DOWNLOAD 0x09
84 #define FUNC_FW_UPLOAD 0x12
85
86 u8 img_type;
87 #define FW_IMG_FW 0x01
88 #define FW_IMG_BIOS 0x02
89 #define FW_IMG_NVR 0x03
90 #define FW_IMG_RAW 0x04
91 #define FW_IMG_FM_API 0x05
92 #define FW_IMG_FS_API 0x06
93
94 u8 pad[2];
95 u32 img_offset;
96 u32 img_size;
97 u8 image[0x80000];
98};
99
100struct __packed atto_param_rw_request {
101 u16 code;
102 char data_buffer[512];
103};
104
105#define MAX_CHANNEL 256
106
107struct __packed atto_channel_list {
108 u32 num_channels;
109 u8 channel[MAX_CHANNEL];
110};
111
112struct __packed atto_channel_info {
113 u8 major_rev;
114 u8 minor_rev;
115 u8 IRQ;
116 u8 revision_id;
117 u8 pci_bus;
118 u8 pci_dev_func;
119 u8 core_rev;
120 u8 host_no;
121 u16 device_id;
122 u16 vendor_id;
123 u16 ven_dev_id;
124 u8 pad[3];
125 u32 hbaapi_rev;
126};
127
128/*
129 * CSMI control codes
130 * class independent
131 */
132#define CSMI_CC_GET_DRVR_INFO 1
133#define CSMI_CC_GET_CNTLR_CFG 2
134#define CSMI_CC_GET_CNTLR_STS 3
135#define CSMI_CC_FW_DOWNLOAD 4
136
137/* RAID class */
138#define CSMI_CC_GET_RAID_INFO 10
139#define CSMI_CC_GET_RAID_CFG 11
140
141/* HBA class */
142#define CSMI_CC_GET_PHY_INFO 20
143#define CSMI_CC_SET_PHY_INFO 21
144#define CSMI_CC_GET_LINK_ERRORS 22
145#define CSMI_CC_SMP_PASSTHRU 23
146#define CSMI_CC_SSP_PASSTHRU 24
147#define CSMI_CC_STP_PASSTHRU 25
148#define CSMI_CC_GET_SATA_SIG 26
149#define CSMI_CC_GET_SCSI_ADDR 27
150#define CSMI_CC_GET_DEV_ADDR 28
151#define CSMI_CC_TASK_MGT 29
152#define CSMI_CC_GET_CONN_INFO 30
153
154/* PHY class */
155#define CSMI_CC_PHY_CTRL 60
156
157/*
158 * CSMI status codes
159 * class independent
160 */
161#define CSMI_STS_SUCCESS 0
162#define CSMI_STS_FAILED 1
163#define CSMI_STS_BAD_CTRL_CODE 2
164#define CSMI_STS_INV_PARAM 3
165#define CSMI_STS_WRITE_ATTEMPTED 4
166
167/* RAID class */
168#define CSMI_STS_INV_RAID_SET 1000
169
170/* HBA class */
171#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS
172#define CSMI_STS_PHY_UNCHANGEABLE 2000
173#define CSMI_STS_INV_LINK_RATE 2001
174#define CSMI_STS_INV_PHY 2002
175#define CSMI_STS_INV_PHY_FOR_PORT 2003
176#define CSMI_STS_PHY_UNSELECTABLE 2004
177#define CSMI_STS_SELECT_PHY_OR_PORT 2005
178#define CSMI_STS_INV_PORT 2006
179#define CSMI_STS_PORT_UNSELECTABLE 2007
180#define CSMI_STS_CONNECTION_FAILED 2008
181#define CSMI_STS_NO_SATA_DEV 2009
182#define CSMI_STS_NO_SATA_SIGNATURE 2010
183#define CSMI_STS_SCSI_EMULATION 2011
184#define CSMI_STS_NOT_AN_END_DEV 2012
185#define CSMI_STS_NO_SCSI_ADDR 2013
186#define CSMI_STS_NO_DEV_ADDR 2014
187
188/* CSMI class independent structures */
189struct atto_csmi_get_driver_info {
190 char name[81];
191 char description[81];
192 u16 major_rev;
193 u16 minor_rev;
194 u16 build_rev;
195 u16 release_rev;
196 u16 csmi_major_rev;
197 u16 csmi_minor_rev;
198 #define CSMI_MAJOR_REV_0_81 0
199 #define CSMI_MINOR_REV_0_81 81
200
201 #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81
202 #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81
203};
204
205struct atto_csmi_get_pci_bus_addr {
206 u8 bus_num;
207 u8 device_num;
208 u8 function_num;
209 u8 reserved;
210};
211
212struct atto_csmi_get_cntlr_cfg {
213 u32 base_io_addr;
214
215 struct {
216 u32 base_memaddr_lo;
217 u32 base_memaddr_hi;
218 };
219
220 u32 board_id;
221 u16 slot_num;
222 #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF
223
224 u8 cntlr_class;
225 #define CSMI_CNTLR_CLASS_HBA 5
226
227 u8 io_bus_type;
228 #define CSMI_BUS_TYPE_PCI 3
229 #define CSMI_BUS_TYPE_PCMCIA 4
230
231 union {
232 struct atto_csmi_get_pci_bus_addr pci_addr;
233 u8 reserved[32];
234 };
235
236 char serial_num[81];
237 u16 major_rev;
238 u16 minor_rev;
239 u16 build_rev;
240 u16 release_rev;
241 u16 bios_major_rev;
242 u16 bios_minor_rev;
243 u16 bios_build_rev;
244 u16 bios_release_rev;
245 u32 cntlr_flags;
246 #define CSMI_CNTLRF_SAS_HBA 0x00000001
247 #define CSMI_CNTLRF_SAS_RAID 0x00000002
248 #define CSMI_CNTLRF_SATA_HBA 0x00000004
249 #define CSMI_CNTLRF_SATA_RAID 0x00000008
250 #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000
251 #define CSMI_CNTLRF_FWD_ONLINE 0x00020000
252 #define CSMI_CNTLRF_FWD_SRESET 0x00040000
253 #define CSMI_CNTLRF_FWD_HRESET 0x00080000
254 #define CSMI_CNTLRF_FWD_RROM 0x00100000
255
256 u16 rrom_major_rev;
257 u16 rrom_minor_rev;
258 u16 rrom_build_rev;
259 u16 rrom_release_rev;
260 u16 rrom_biosmajor_rev;
261 u16 rrom_biosminor_rev;
262 u16 rrom_biosbuild_rev;
263 u16 rrom_biosrelease_rev;
264 u8 reserved2[7];
265};
266
267struct atto_csmi_get_cntlr_sts {
268 u32 status;
269 #define CSMI_CNTLR_STS_GOOD 1
270 #define CSMI_CNTLR_STS_FAILED 2
271 #define CSMI_CNTLR_STS_OFFLINE 3
272 #define CSMI_CNTLR_STS_POWEROFF 4
273
274 u32 offline_reason;
275 #define CSMI_OFFLINE_NO_REASON 0
276 #define CSMI_OFFLINE_INITIALIZING 1
277 #define CSMI_OFFLINE_BUS_DEGRADED 2
278 #define CSMI_OFFLINE_BUS_FAILURE 3
279
280 u8 reserved[28];
281};
282
283struct atto_csmi_fw_download {
284 u32 buffer_len;
285 u32 download_flags;
286 #define CSMI_FWDF_VALIDATE 0x00000001
287 #define CSMI_FWDF_SOFT_RESET 0x00000002
288 #define CSMI_FWDF_HARD_RESET 0x00000004
289
290 u8 reserved[32];
291 u16 status;
292 #define CSMI_FWD_STS_SUCCESS 0
293 #define CSMI_FWD_STS_FAILED 1
294 #define CSMI_FWD_STS_USING_RROM 2
295 #define CSMI_FWD_STS_REJECT 3
296 #define CSMI_FWD_STS_DOWNREV 4
297
298 u16 severity;
299 #define CSMI_FWD_SEV_INFO 0
300 #define CSMI_FWD_SEV_WARNING 1
301 #define CSMI_FWD_SEV_ERROR 2
302 #define CSMI_FWD_SEV_FATAL 3
303
304};
305
306/* CSMI RAID class structures */
307struct atto_csmi_get_raid_info {
308 u32 num_raid_sets;
309 u32 max_drivesper_set;
310 u8 reserved[92];
311};
312
313struct atto_csmi_raid_drives {
314 char model[40];
315 char firmware[8];
316 char serial_num[40];
317 u8 sas_addr[8];
318 u8 lun[8];
319 u8 drive_sts;
320 #define CSMI_DRV_STS_OK 0
321 #define CSMI_DRV_STS_REBUILDING 1
322 #define CSMI_DRV_STS_FAILED 2
323 #define CSMI_DRV_STS_DEGRADED 3
324
325 u8 drive_usage;
326 #define CSMI_DRV_USE_NOT_USED 0
327 #define CSMI_DRV_USE_MEMBER 1
328 #define CSMI_DRV_USE_SPARE 2
329
330 u8 reserved[30]; /* spec says 22 */
331};
332
333struct atto_csmi_get_raid_cfg {
334 u32 raid_set_index;
335 u32 capacity;
336 u32 stripe_size;
337 u8 raid_type;
338 u8 status;
339 u8 information;
340 u8 drive_cnt;
341 u8 reserved[20];
342
343 struct atto_csmi_raid_drives drives[1];
344};
345
346/* CSMI HBA class structures */
347struct atto_csmi_phy_entity {
348 u8 ident_frame[0x1C];
349 u8 port_id;
350 u8 neg_link_rate;
351 u8 min_link_rate;
352 u8 max_link_rate;
353 u8 phy_change_cnt;
354 u8 auto_discover;
355 #define CSMI_DISC_NOT_SUPPORTED 0x00
356 #define CSMI_DISC_NOT_STARTED 0x01
357 #define CSMI_DISC_IN_PROGRESS 0x02
358 #define CSMI_DISC_COMPLETE 0x03
359 #define CSMI_DISC_ERROR 0x04
360
361 u8 reserved[2];
362 u8 attach_ident_frame[0x1C];
363};
364
365struct atto_csmi_get_phy_info {
366 u8 number_of_phys;
367 u8 reserved[3];
368 struct atto_csmi_phy_entity
369 phy[32];
370};
371
372struct atto_csmi_set_phy_info {
373 u8 phy_id;
374 u8 neg_link_rate;
375 #define CSMI_NEG_RATE_NEGOTIATE 0x00
376 #define CSMI_NEG_RATE_PHY_DIS 0x01
377
378 u8 prog_minlink_rate;
379 u8 prog_maxlink_rate;
380 u8 signal_class;
381 #define CSMI_SIG_CLASS_UNKNOWN 0x00
382 #define CSMI_SIG_CLASS_DIRECT 0x01
383 #define CSMI_SIG_CLASS_SERVER 0x02
384 #define CSMI_SIG_CLASS_ENCLOSURE 0x03
385
386 u8 reserved[3];
387};
388
389struct atto_csmi_get_link_errors {
390 u8 phy_id;
391 u8 reset_cnts;
392 #define CSMI_RESET_CNTS_NO 0x00
393 #define CSMI_RESET_CNTS_YES 0x01
394
395 u8 reserved[2];
396 u32 inv_dw_cnt;
397 u32 disp_err_cnt;
398 u32 loss_ofdw_sync_cnt;
399 u32 phy_reseterr_cnt;
400
401 /*
402 * The following field has been added by ATTO for ease of
403 * implementation of additional statistics. Drivers must validate
404 * the length of the IOCTL payload prior to filling them in so CSMI
405 * complaint applications function correctly.
406 */
407
408 u32 crc_err_cnt;
409};
410
411struct atto_csmi_smp_passthru {
412 u8 phy_id;
413 u8 port_id;
414 u8 conn_rate;
415 u8 reserved;
416 u8 dest_sas_addr[8];
417 u32 req_len;
418 u8 smp_req[1020];
419 u8 conn_sts;
420 u8 reserved2[3];
421 u32 rsp_len;
422 u8 smp_rsp[1020];
423};
424
425struct atto_csmi_ssp_passthru_sts {
426 u8 conn_sts;
427 u8 reserved[3];
428 u8 data_present;
429 u8 status;
430 u16 rsp_length;
431 u8 rsp[256];
432 u32 data_bytes;
433};
434
435struct atto_csmi_ssp_passthru {
436 u8 phy_id;
437 u8 port_id;
438 u8 conn_rate;
439 u8 reserved;
440 u8 dest_sas_addr[8];
441 u8 lun[8];
442 u8 cdb_len;
443 u8 add_cdb_len;
444 u8 reserved2[2];
445 u8 cdb[16];
446 u32 flags;
447 #define CSMI_SSPF_DD_READ 0x00000001
448 #define CSMI_SSPF_DD_WRITE 0x00000002
449 #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004
450 #define CSMI_SSPF_TA_SIMPLE 0x00000000
451 #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010
452 #define CSMI_SSPF_TA_ORDERED 0x00000020
453 #define CSMI_SSPF_TA_ACA 0x00000040
454
455 u8 add_cdb[24];
456 u32 data_len;
457
458 struct atto_csmi_ssp_passthru_sts sts;
459};
460
461struct atto_csmi_stp_passthru_sts {
462 u8 conn_sts;
463 u8 reserved[3];
464 u8 sts_fis[20];
465 u32 scr[16];
466 u32 data_bytes;
467};
468
469struct atto_csmi_stp_passthru {
470 u8 phy_id;
471 u8 port_id;
472 u8 conn_rate;
473 u8 reserved;
474 u8 dest_sas_addr[8];
475 u8 reserved2[4];
476 u8 command_fis[20];
477 u32 flags;
478 #define CSMI_STPF_DD_READ 0x00000001
479 #define CSMI_STPF_DD_WRITE 0x00000002
480 #define CSMI_STPF_DD_UNSPECIFIED 0x00000004
481 #define CSMI_STPF_PIO 0x00000010
482 #define CSMI_STPF_DMA 0x00000020
483 #define CSMI_STPF_PACKET 0x00000040
484 #define CSMI_STPF_DMA_QUEUED 0x00000080
485 #define CSMI_STPF_EXECUTE_DIAG 0x00000100
486 #define CSMI_STPF_RESET_DEVICE 0x00000200
487
488 u32 data_len;
489
490 struct atto_csmi_stp_passthru_sts sts;
491};
492
493struct atto_csmi_get_sata_sig {
494 u8 phy_id;
495 u8 reserved[3];
496 u8 reg_dth_fis[20];
497};
498
499struct atto_csmi_get_scsi_addr {
500 u8 sas_addr[8];
501 u8 sas_lun[8];
502 u8 host_index;
503 u8 path_id;
504 u8 target_id;
505 u8 lun;
506};
507
508struct atto_csmi_get_dev_addr {
509 u8 host_index;
510 u8 path_id;
511 u8 target_id;
512 u8 lun;
513 u8 sas_addr[8];
514 u8 sas_lun[8];
515};
516
517struct atto_csmi_task_mgmt {
518 u8 host_index;
519 u8 path_id;
520 u8 target_id;
521 u8 lun;
522 u32 flags;
523 #define CSMI_TMF_TASK_IU 0x00000001
524 #define CSMI_TMF_HARD_RST 0x00000002
525 #define CSMI_TMF_SUPPRESS_RSLT 0x00000004
526
527 u32 queue_tag;
528 u32 reserved;
529 u8 task_mgt_func;
530 u8 reserved2[7];
531 u32 information;
532 #define CSMI_TM_INFO_TEST 1
533 #define CSMI_TM_INFO_EXCEEDED 2
534 #define CSMI_TM_INFO_DEMAND 3
535 #define CSMI_TM_INFO_TRIGGER 4
536
537 struct atto_csmi_ssp_passthru_sts sts;
538
539};
540
541struct atto_csmi_get_conn_info {
542 u32 pinout;
543 #define CSMI_CON_UNKNOWN 0x00000001
544 #define CSMI_CON_SFF_8482 0x00000002
545 #define CSMI_CON_SFF_8470_LANE_1 0x00000100
546 #define CSMI_CON_SFF_8470_LANE_2 0x00000200
547 #define CSMI_CON_SFF_8470_LANE_3 0x00000400
548 #define CSMI_CON_SFF_8470_LANE_4 0x00000800
549 #define CSMI_CON_SFF_8484_LANE_1 0x00010000
550 #define CSMI_CON_SFF_8484_LANE_2 0x00020000
551 #define CSMI_CON_SFF_8484_LANE_3 0x00040000
552 #define CSMI_CON_SFF_8484_LANE_4 0x00080000
553
554 u8 connector[16];
555 u8 location;
556 #define CSMI_CON_INTERNAL 0x02
557 #define CSMI_CON_EXTERNAL 0x04
558 #define CSMI_CON_SWITCHABLE 0x08
559 #define CSMI_CON_AUTO 0x10
560
561 u8 reserved[15];
562};
563
564/* CSMI PHY class structures */
565struct atto_csmi_character {
566 u8 type_flags;
567 #define CSMI_CTF_POS_DISP 0x01
568 #define CSMI_CTF_NEG_DISP 0x02
569 #define CSMI_CTF_CTRL_CHAR 0x04
570
571 u8 value;
572};
573
574struct atto_csmi_pc_ctrl {
575 u8 type;
576 #define CSMI_PC_TYPE_UNDEFINED 0x00
577 #define CSMI_PC_TYPE_SATA 0x01
578 #define CSMI_PC_TYPE_SAS 0x02
579 u8 rate;
580 u8 reserved[6];
581 u32 vendor_unique[8];
582 u32 tx_flags;
583 #define CSMI_PC_TXF_PREEMP_DIS 0x00000001
584
585 signed char tx_amplitude;
586 signed char tx_preemphasis;
587 signed char tx_slew_rate;
588 signed char tx_reserved[13];
589 u8 tx_vendor_unique[64];
590 u32 rx_flags;
591 #define CSMI_PC_RXF_EQ_DIS 0x00000001
592
593 signed char rx_threshold;
594 signed char rx_equalization_gain;
595 signed char rx_reserved[14];
596 u8 rx_vendor_unique[64];
597 u32 pattern_flags;
598 #define CSMI_PC_PATF_FIXED 0x00000001
599 #define CSMI_PC_PATF_DIS_SCR 0x00000002
600 #define CSMI_PC_PATF_DIS_ALIGN 0x00000004
601 #define CSMI_PC_PATF_DIS_SSC 0x00000008
602
603 u8 fixed_pattern;
604 #define CSMI_PC_FP_CJPAT 0x00000001
605 #define CSMI_PC_FP_ALIGN 0x00000002
606
607 u8 user_pattern_len;
608 u8 pattern_reserved[6];
609
610 struct atto_csmi_character user_pattern_buffer[16];
611};
612
613struct atto_csmi_phy_ctrl {
614 u32 function;
615 #define CSMI_PC_FUNC_GET_SETUP 0x00000100
616
617 u8 phy_id;
618 u16 len_of_cntl;
619 u8 num_of_cntls;
620 u8 reserved[4];
621 u32 link_flags;
622 #define CSMI_PHY_ACTIVATE_CTRL 0x00000001
623 #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002
624 #define CSMI_PHY_AUTO_COMWAKE 0x00000004
625
626 u8 spinup_rate;
627 u8 link_reserved[7];
628 u32 vendor_unique[8];
629
630 struct atto_csmi_pc_ctrl control[1];
631};
632
633union atto_ioctl_csmi {
634 struct atto_csmi_get_driver_info drvr_info;
635 struct atto_csmi_get_cntlr_cfg cntlr_cfg;
636 struct atto_csmi_get_cntlr_sts cntlr_sts;
637 struct atto_csmi_fw_download fw_dwnld;
638 struct atto_csmi_get_raid_info raid_info;
639 struct atto_csmi_get_raid_cfg raid_cfg;
640 struct atto_csmi_get_phy_info get_phy_info;
641 struct atto_csmi_set_phy_info set_phy_info;
642 struct atto_csmi_get_link_errors link_errs;
643 struct atto_csmi_smp_passthru smp_pass_thru;
644 struct atto_csmi_ssp_passthru ssp_pass_thru;
645 struct atto_csmi_stp_passthru stp_pass_thru;
646 struct atto_csmi_task_mgmt tsk_mgt;
647 struct atto_csmi_get_sata_sig sata_sig;
648 struct atto_csmi_get_scsi_addr scsi_addr;
649 struct atto_csmi_get_dev_addr dev_addr;
650 struct atto_csmi_get_conn_info conn_info[32];
651 struct atto_csmi_phy_ctrl phy_ctrl;
652};
653
654struct atto_csmi {
655 u32 control_code;
656 u32 status;
657 union atto_ioctl_csmi data;
658};
659
660struct atto_module_info {
661 void *adapter;
662 void *pci_dev;
663 void *scsi_host;
664 unsigned short host_no;
665 union {
666 struct {
667 u64 node_name;
668 u64 port_name;
669 };
670 u64 sas_addr;
671 };
672};
673
674#define ATTO_FUNC_GET_ADAP_INFO 0x00
675#define ATTO_VER_GET_ADAP_INFO0 0
676#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0
677
678struct __packed atto_hba_get_adapter_info {
679
680 struct {
681 u16 vendor_id;
682 u16 device_id;
683 u16 ss_vendor_id;
684 u16 ss_device_id;
685 u8 class_code[3];
686 u8 rev_id;
687 u8 bus_num;
688 u8 dev_num;
689 u8 func_num;
690 u8 link_width_max;
691 u8 link_width_curr;
692 #define ATTO_GAI_PCILW_UNKNOWN 0x00
693
694 u8 link_speed_max;
695 u8 link_speed_curr;
696 #define ATTO_GAI_PCILS_UNKNOWN 0x00
697 #define ATTO_GAI_PCILS_GEN1 0x01
698 #define ATTO_GAI_PCILS_GEN2 0x02
699 #define ATTO_GAI_PCILS_GEN3 0x03
700
701 u8 interrupt_mode;
702 #define ATTO_GAI_PCIIM_UNKNOWN 0x00
703 #define ATTO_GAI_PCIIM_LEGACY 0x01
704 #define ATTO_GAI_PCIIM_MSI 0x02
705 #define ATTO_GAI_PCIIM_MSIX 0x03
706
707 u8 msi_vector_cnt;
708 u8 reserved[19];
709 } pci;
710
711 u8 adap_type;
712 #define ATTO_GAI_AT_EPCIU320 0x00
713 #define ATTO_GAI_AT_ESASRAID 0x01
714 #define ATTO_GAI_AT_ESASRAID2 0x02
715 #define ATTO_GAI_AT_ESASHBA 0x03
716 #define ATTO_GAI_AT_ESASHBA2 0x04
717 #define ATTO_GAI_AT_CELERITY 0x05
718 #define ATTO_GAI_AT_CELERITY8 0x06
719 #define ATTO_GAI_AT_FASTFRAME 0x07
720 #define ATTO_GAI_AT_ESASHBA3 0x08
721 #define ATTO_GAI_AT_CELERITY16 0x09
722 #define ATTO_GAI_AT_TLSASHBA 0x0A
723 #define ATTO_GAI_AT_ESASHBA4 0x0B
724
725 u8 adap_flags;
726 #define ATTO_GAI_AF_DEGRADED 0x01
727 #define ATTO_GAI_AF_SPT_SUPP 0x02
728 #define ATTO_GAI_AF_DEVADDR_SUPP 0x04
729 #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08
730 #define ATTO_GAI_AF_TEST_SUPP 0x10
731 #define ATTO_GAI_AF_DIAG_SUPP 0x20
732 #define ATTO_GAI_AF_VIRT_SES 0x40
733 #define ATTO_GAI_AF_CONN_CTRL 0x80
734
735 u8 num_ports;
736 u8 num_phys;
737 u8 drvr_rev_major;
738 u8 drvr_rev_minor;
739 u8 drvr_revsub_minor;
740 u8 drvr_rev_build;
741 char drvr_rev_ascii[16];
742 char drvr_name[32];
743 char firmware_rev[16];
744 char flash_rev[16];
745 char model_name_short[16];
746 char model_name[32];
747 u32 num_targets;
748 u32 num_targsper_bus;
749 u32 num_lunsper_targ;
750 u8 num_busses;
751 u8 num_connectors;
752 u8 adap_flags2;
753 #define ATTO_GAI_AF2_FCOE_SUPP 0x01
754 #define ATTO_GAI_AF2_NIC_SUPP 0x02
755 #define ATTO_GAI_AF2_LOCATE_SUPP 0x04
756 #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08
757 #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10
758 #define ATTO_GAI_AF2_NPIV_SUPP 0x20
759 #define ATTO_GAI_AF2_MP_SUPP 0x40
760
761 u8 num_temp_sensors;
762 u32 num_targets_backend;
763 u32 tunnel_flags;
764 #define ATTO_GAI_TF_MEM_RW 0x00000001
765 #define ATTO_GAI_TF_TRACE 0x00000002
766 #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004
767 #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008
768 #define ATTO_GAI_TF_PHY_CTRL 0x00000010
769 #define ATTO_GAI_TF_CONN_CTRL 0x00000020
770 #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040
771
772 u8 reserved3[0x138];
773};
774
775#define ATTO_FUNC_GET_ADAP_ADDR 0x01
776#define ATTO_VER_GET_ADAP_ADDR0 0
777#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0
778
779struct __packed atto_hba_get_adapter_address {
780
781 u8 addr_type;
782 #define ATTO_GAA_AT_PORT 0x00
783 #define ATTO_GAA_AT_NODE 0x01
784 #define ATTO_GAA_AT_CURR_MAC 0x02
785 #define ATTO_GAA_AT_PERM_MAC 0x03
786 #define ATTO_GAA_AT_VNIC 0x04
787
788 u8 port_id;
789 u16 addr_len;
790 u8 address[256];
791};
792
793#define ATTO_FUNC_MEM_RW 0x02
794#define ATTO_VER_MEM_RW0 0
795#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0
796
797struct __packed atto_hba_memory_read_write {
798 u8 mem_func;
799 u8 mem_type;
800 union {
801 u8 pci_index;
802 u8 i2c_dev;
803 };
804 u8 i2c_status;
805 u32 length;
806 u64 address;
807 u8 reserved[48];
808
809};
810
811#define ATTO_FUNC_TRACE 0x03
812#define ATTO_VER_TRACE0 0
813#define ATTO_VER_TRACE1 1
814#define ATTO_VER_TRACE ATTO_VER_TRACE1
815
816struct __packed atto_hba_trace {
817 u8 trace_func;
818 #define ATTO_TRC_TF_GET_INFO 0x00
819 #define ATTO_TRC_TF_ENABLE 0x01
820 #define ATTO_TRC_TF_DISABLE 0x02
821 #define ATTO_TRC_TF_SET_MASK 0x03
822 #define ATTO_TRC_TF_UPLOAD 0x04
823 #define ATTO_TRC_TF_RESET 0x05
824
825 u8 trace_type;
826 #define ATTO_TRC_TT_DRIVER 0x00
827 #define ATTO_TRC_TT_FWCOREDUMP 0x01
828
829 u8 reserved[2];
830 u32 current_offset;
831 u32 total_length;
832 u32 trace_mask;
833 u8 reserved2[48];
834};
835
836#define ATTO_FUNC_SCSI_PASS_THRU 0x04
837#define ATTO_VER_SCSI_PASS_THRU0 0
838#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0
839
840struct __packed atto_hba_scsi_pass_thru {
841 u8 cdb[32];
842 u8 cdb_length;
843 u8 req_status;
844 #define ATTO_SPT_RS_SUCCESS 0x00
845 #define ATTO_SPT_RS_FAILED 0x01
846 #define ATTO_SPT_RS_OVERRUN 0x02
847 #define ATTO_SPT_RS_UNDERRUN 0x03
848 #define ATTO_SPT_RS_NO_DEVICE 0x04
849 #define ATTO_SPT_RS_NO_LUN 0x05
850 #define ATTO_SPT_RS_TIMEOUT 0x06
851 #define ATTO_SPT_RS_BUS_RESET 0x07
852 #define ATTO_SPT_RS_ABORTED 0x08
853 #define ATTO_SPT_RS_BUSY 0x09
854 #define ATTO_SPT_RS_DEGRADED 0x0A
855
856 u8 scsi_status;
857 u8 sense_length;
858 u32 flags;
859 #define ATTO_SPTF_DATA_IN 0x00000001
860 #define ATTO_SPTF_DATA_OUT 0x00000002
861 #define ATTO_SPTF_SIMPLE_Q 0x00000004
862 #define ATTO_SPTF_HEAD_OF_Q 0x00000008
863 #define ATTO_SPTF_ORDERED_Q 0x00000010
864
865 u32 timeout;
866 u32 target_id;
867 u8 lun[8];
868 u32 residual_length;
869 u8 sense_data[0xFC];
870 u8 reserved[0x28];
871};
872
873#define ATTO_FUNC_GET_DEV_ADDR 0x05
874#define ATTO_VER_GET_DEV_ADDR0 0
875#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0
876
877struct __packed atto_hba_get_device_address {
878 u8 addr_type;
879 #define ATTO_GDA_AT_PORT 0x00
880 #define ATTO_GDA_AT_NODE 0x01
881 #define ATTO_GDA_AT_MAC 0x02
882 #define ATTO_GDA_AT_PORTID 0x03
883 #define ATTO_GDA_AT_UNIQUE 0x04
884
885 u8 reserved;
886 u16 addr_len;
887 u32 target_id;
888 u8 address[256];
889};
890
891/* The following functions are supported by firmware but do not have any
892 * associated driver structures
893 */
894#define ATTO_FUNC_PHY_CTRL 0x06
895#define ATTO_FUNC_CONN_CTRL 0x0C
896#define ATTO_FUNC_ADAP_CTRL 0x0E
897#define ATTO_VER_ADAP_CTRL0 0
898#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0
899
900struct __packed atto_hba_adap_ctrl {
901 u8 adap_func;
902 #define ATTO_AC_AF_HARD_RST 0x00
903 #define ATTO_AC_AF_GET_STATE 0x01
904 #define ATTO_AC_AF_GET_TEMP 0x02
905
906 u8 adap_state;
907 #define ATTO_AC_AS_UNKNOWN 0x00
908 #define ATTO_AC_AS_OK 0x01
909 #define ATTO_AC_AS_RST_SCHED 0x02
910 #define ATTO_AC_AS_RST_IN_PROG 0x03
911 #define ATTO_AC_AS_RST_DISC 0x04
912 #define ATTO_AC_AS_DEGRADED 0x05
913 #define ATTO_AC_AS_DISABLED 0x06
914 #define ATTO_AC_AS_TEMP 0x07
915
916 u8 reserved[2];
917
918 union {
919 struct {
920 u8 temp_sensor;
921 u8 temp_state;
922
923 #define ATTO_AC_TS_UNSUPP 0x00
924 #define ATTO_AC_TS_UNKNOWN 0x01
925 #define ATTO_AC_TS_INIT_FAILED 0x02
926 #define ATTO_AC_TS_NORMAL 0x03
927 #define ATTO_AC_TS_OUT_OF_RANGE 0x04
928 #define ATTO_AC_TS_FAULT 0x05
929
930 signed short temp_value;
931 signed short temp_lower_lim;
932 signed short temp_upper_lim;
933 char temp_desc[32];
934 u8 reserved2[20];
935 };
936 };
937};
938
939#define ATTO_FUNC_GET_DEV_INFO 0x0F
940#define ATTO_VER_GET_DEV_INFO0 0
941#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0
942
943struct __packed atto_hba_sas_device_info {
944
945 #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16
946
947 u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */
948 #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV
949 u32 exp_target_id;
950 u32 sas_port_mask;
951 u8 sas_level;
952 #define ATTO_SDI_SAS_LVL_INV 0xFF
953
954 u8 slot_num;
955 #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV
956
957 u8 dev_type;
958 #define ATTO_SDI_DT_END_DEVICE 0
959 #define ATTO_SDI_DT_EXPANDER 1
960 #define ATTO_SDI_DT_PORT_MULT 2
961
962 u8 ini_flags;
963 u8 tgt_flags;
964 u8 link_rate; /* SMP_RATE_XXX */
965 u8 loc_flags;
966 #define ATTO_SDI_LF_DIRECT 0x01
967 #define ATTO_SDI_LF_EXPANDER 0x02
968 #define ATTO_SDI_LF_PORT_MULT 0x04
969 u8 pm_port;
970 u8 reserved[0x60];
971};
972
973union atto_hba_device_info {
974 struct atto_hba_sas_device_info sas_dev_info;
975};
976
977struct __packed atto_hba_get_device_info {
978 u32 target_id;
979 u8 info_type;
980 #define ATTO_GDI_IT_UNKNOWN 0x00
981 #define ATTO_GDI_IT_SAS 0x01
982 #define ATTO_GDI_IT_FC 0x02
983 #define ATTO_GDI_IT_FCOE 0x03
984
985 u8 reserved[11];
986 union atto_hba_device_info dev_info;
987};
988
989struct atto_ioctl {
990 u8 version;
991 u8 function; /* ATTO_FUNC_XXX */
992 u8 status;
993#define ATTO_STS_SUCCESS 0x00
994#define ATTO_STS_FAILED 0x01
995#define ATTO_STS_INV_VERSION 0x02
996#define ATTO_STS_OUT_OF_RSRC 0x03
997#define ATTO_STS_INV_FUNC 0x04
998#define ATTO_STS_UNSUPPORTED 0x05
999#define ATTO_STS_INV_ADAPTER 0x06
1000#define ATTO_STS_INV_DRVR_VER 0x07
1001#define ATTO_STS_INV_PARAM 0x08
1002#define ATTO_STS_TIMEOUT 0x09
1003#define ATTO_STS_NOT_APPL 0x0A
1004#define ATTO_STS_DEGRADED 0x0B
1005
1006 u8 flags;
1007 #define HBAF_TUNNEL 0x01
1008
1009 u32 data_length;
1010 u8 reserved2[56];
1011
1012 union {
1013 u8 byte[1];
1014 struct atto_hba_get_adapter_info get_adap_info;
1015 struct atto_hba_get_adapter_address get_adap_addr;
1016 struct atto_hba_scsi_pass_thru scsi_pass_thru;
1017 struct atto_hba_get_device_address get_dev_addr;
1018 struct atto_hba_adap_ctrl adap_ctrl;
1019 struct atto_hba_get_device_info get_dev_info;
1020 struct atto_hba_trace trace;
1021 } data;
1022
1023};
1024
1025struct __packed atto_ioctl_vda_scsi_cmd {
1026
1027 #define ATTO_VDA_SCSI_VER0 0
1028 #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0
1029
1030 u8 cdb[16];
1031 u32 flags;
1032 u32 data_length;
1033 u32 residual_length;
1034 u16 target_id;
1035 u8 sense_len;
1036 u8 scsi_stat;
1037 u8 reserved[8];
1038 u8 sense_data[80];
1039};
1040
1041struct __packed atto_ioctl_vda_flash_cmd {
1042
1043 #define ATTO_VDA_FLASH_VER0 0
1044 #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0
1045
1046 u32 flash_addr;
1047 u32 data_length;
1048 u8 sub_func;
1049 u8 reserved[15];
1050
1051 union {
1052 struct {
1053 u32 flash_size;
1054 u32 page_size;
1055 u8 prod_info[32];
1056 } info;
1057
1058 struct {
1059 char file_name[16]; /* 8.3 fname, NULL term, wc=* */
1060 u32 file_size;
1061 } file;
1062 } data;
1063
1064};
1065
1066struct __packed atto_ioctl_vda_diag_cmd {
1067
1068 #define ATTO_VDA_DIAG_VER0 0
1069 #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0
1070
1071 u64 local_addr;
1072 u32 data_length;
1073 u8 sub_func;
1074 u8 flags;
1075 u8 reserved[3];
1076};
1077
1078struct __packed atto_ioctl_vda_cli_cmd {
1079
1080 #define ATTO_VDA_CLI_VER0 0
1081 #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0
1082
1083 u32 cmd_rsp_len;
1084};
1085
1086struct __packed atto_ioctl_vda_smp_cmd {
1087
1088 #define ATTO_VDA_SMP_VER0 0
1089 #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0
1090
1091 u64 dest;
1092 u32 cmd_rsp_len;
1093};
1094
1095struct __packed atto_ioctl_vda_cfg_cmd {
1096
1097 #define ATTO_VDA_CFG_VER0 0
1098 #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0
1099
1100 u32 data_length;
1101 u8 cfg_func;
1102 u8 reserved[11];
1103
1104 union {
1105 u8 bytes[112];
1106 struct atto_vda_cfg_init init;
1107 } data;
1108
1109};
1110
1111struct __packed atto_ioctl_vda_mgt_cmd {
1112
1113 #define ATTO_VDA_MGT_VER0 0
1114 #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0
1115
1116 u8 mgt_func;
1117 u8 scan_generation;
1118 u16 dev_index;
1119 u32 data_length;
1120 u8 reserved[8];
1121 union {
1122 u8 bytes[112];
1123 struct atto_vda_devinfo dev_info;
1124 struct atto_vda_grp_info grp_info;
1125 struct atto_vdapart_info part_info;
1126 struct atto_vda_dh_info dh_info;
1127 struct atto_vda_metrics_info metrics_info;
1128 struct atto_vda_schedule_info sched_info;
1129 struct atto_vda_n_vcache_info nvcache_info;
1130 struct atto_vda_buzzer_info buzzer_info;
1131 struct atto_vda_adapter_info adapter_info;
1132 struct atto_vda_temp_info temp_info;
1133 struct atto_vda_fan_info fan_info;
1134 } data;
1135};
1136
1137struct __packed atto_ioctl_vda_gsv_cmd {
1138
1139 #define ATTO_VDA_GSV_VER0 0
1140 #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0
1141
1142 u8 rsp_len;
1143 u8 reserved[7];
1144 u8 version_info[1];
1145 #define ATTO_VDA_VER_UNSUPPORTED 0xFF
1146
1147};
1148
1149struct __packed atto_ioctl_vda {
1150 u8 version;
1151 u8 function; /* VDA_FUNC_XXXX */
1152 u8 status; /* ATTO_STS_XXX */
1153 u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */
1154 u32 data_length;
1155 u8 reserved[8];
1156
1157 union {
1158 struct atto_ioctl_vda_scsi_cmd scsi;
1159 struct atto_ioctl_vda_flash_cmd flash;
1160 struct atto_ioctl_vda_diag_cmd diag;
1161 struct atto_ioctl_vda_cli_cmd cli;
1162 struct atto_ioctl_vda_smp_cmd smp;
1163 struct atto_ioctl_vda_cfg_cmd cfg;
1164 struct atto_ioctl_vda_mgt_cmd mgt;
1165 struct atto_ioctl_vda_gsv_cmd gsv;
1166 u8 cmd_info[256];
1167 } cmd;
1168
1169 union {
1170 u8 data[1];
1171 struct atto_vda_devinfo2 dev_info2;
1172 } data;
1173
1174};
1175
1176struct __packed atto_ioctl_smp {
1177 u8 version;
1178 #define ATTO_SMP_VERSION0 0
1179 #define ATTO_SMP_VERSION1 1
1180 #define ATTO_SMP_VERSION2 2
1181 #define ATTO_SMP_VERSION ATTO_SMP_VERSION2
1182
1183 u8 function;
1184#define ATTO_SMP_FUNC_DISC_SMP 0x00
1185#define ATTO_SMP_FUNC_DISC_TARG 0x01
1186#define ATTO_SMP_FUNC_SEND_CMD 0x02
1187#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03
1188#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04
1189#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05
1190
1191 u8 status; /* ATTO_STS_XXX */
1192 u8 smp_status; /* if status == ATTO_STS_SUCCESS */
1193 #define ATTO_SMP_STS_SUCCESS 0x00
1194 #define ATTO_SMP_STS_FAILURE 0x01
1195 #define ATTO_SMP_STS_RESCAN 0x02
1196 #define ATTO_SMP_STS_NOT_FOUND 0x03
1197
1198 u16 target_id;
1199 u8 phy_id;
1200 u8 dev_index;
1201 u64 smp_sas_addr;
1202 u64 targ_sas_addr;
1203 u32 req_length;
1204 u32 rsp_length;
1205 u8 flags;
1206 #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */
1207
1208 u8 reserved[31];
1209
1210 union {
1211 u8 byte[1];
1212 u32 dword[1];
1213 } data;
1214
1215};
1216
1217struct __packed atto_express_ioctl {
1218 struct atto_express_ioctl_header header;
1219
1220 union {
1221 struct atto_firmware_rw_request fwrw;
1222 struct atto_param_rw_request prw;
1223 struct atto_channel_list chanlist;
1224 struct atto_channel_info chaninfo;
1225 struct atto_ioctl ioctl_hba;
1226 struct atto_module_info modinfo;
1227 struct atto_ioctl_vda ioctl_vda;
1228 struct atto_ioctl_smp ioctl_smp;
1229 struct atto_csmi csmi;
1230
1231 } data;
1232};
1233
1234/* The struct associated with the code is listed after the definition */
1235#define EXPRESS_IOCTL_MIN 0x4500
1236#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */
1237#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */
1238#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */
1239#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */
1240#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */
1241#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */
1242#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */
1243#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */
1244#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */
1245#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */
1246#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */
1247#define EXPRESS_CSMI 0x450B /* CSMI */
1248#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */
1249#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */
1250#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */
1251#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */
1252#define EXPRESS_IOCTL_MAX 0x450F
1253
1254#endif
diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h
new file mode 100644
index 000000000000..5fc1f991d24e
--- /dev/null
+++ b/drivers/scsi/esas2r/atvda.h
@@ -0,0 +1,1319 @@
1/* linux/drivers/scsi/esas2r/atvda.h
2 * ATTO VDA interface definitions
3 *
4 * Copyright (c) 2001-2013 ATTO Technology, Inc.
5 * (mailto:linuxdrivers@attotech.com)
6 */
7/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
8/*
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41 */
42/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
43
44
45#ifndef ATVDA_H
46#define ATVDA_H
47
48struct __packed atto_dev_addr {
49 u64 dev_port;
50 u64 hba_port;
51 u8 lun;
52 u8 flags;
53 #define VDA_DEVADDRF_SATA 0x01
54 #define VDA_DEVADDRF_SSD 0x02
55 u8 link_speed; /* VDALINKSPEED_xxx */
56 u8 pad[1];
57};
58
59/* dev_addr2 was added for 64-bit alignment */
60
61struct __packed atto_dev_addr2 {
62 u64 dev_port;
63 u64 hba_port;
64 u8 lun;
65 u8 flags;
66 u8 link_speed;
67 u8 pad[5];
68};
69
70struct __packed atto_vda_sge {
71 u32 length;
72 u64 address;
73};
74
75
76/* VDA request function codes */
77
78#define VDA_FUNC_SCSI 0x00
79#define VDA_FUNC_FLASH 0x01
80#define VDA_FUNC_DIAG 0x02
81#define VDA_FUNC_AE 0x03
82#define VDA_FUNC_CLI 0x04
83#define VDA_FUNC_IOCTL 0x05
84#define VDA_FUNC_CFG 0x06
85#define VDA_FUNC_MGT 0x07
86#define VDA_FUNC_GSV 0x08
87
88
89/* VDA request status values. for host driver considerations, values for
90 * SCSI requests start at zero. other requests may use these values as well. */
91
92#define RS_SUCCESS 0x00 /*! successful completion */
93#define RS_INV_FUNC 0x01 /*! invalid command function */
94#define RS_BUSY 0x02 /*! insufficient resources */
95#define RS_SEL 0x03 /*! no target at target_id */
96#define RS_NO_LUN 0x04 /*! invalid LUN */
97#define RS_TIMEOUT 0x05 /*! request timeout */
98#define RS_OVERRUN 0x06 /*! data overrun */
99#define RS_UNDERRUN 0x07 /*! data underrun */
100#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */
101#define RS_ABORTED 0x0A /*! command aborted */
102#define RS_RESID_MISM 0x0B /*! residual length incorrect */
103#define RS_TM_FAILED 0x0C /*! task management failed */
104#define RS_RESET 0x0D /*! aborted due to bus reset */
105#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */
106#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */
107#define RS_UNSUPPORTED 0x10 /*! unsupported request */
108#define RS_SEL2 0x70 /*! internal generated RS_SEL */
109#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */
110#define RS_MGT_BASE 0x80 /*! base of VDA management errors */
111#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00)
112#define RS_DEV_INVALID (RS_MGT_BASE + 0x01)
113#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02)
114#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03)
115#define RS_DEV_LOST (RS_MGT_BASE + 0x04)
116#define RS_SCAN_GEN (RS_MGT_BASE + 0x05)
117#define RS_GRP_INVALID (RS_MGT_BASE + 0x08)
118#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09)
119#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A)
120#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B)
121#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C)
122#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D)
123#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E)
124#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F)
125#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10)
126#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11)
127#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12)
128#define RS_CFG_SAVE (RS_MGT_BASE + 0x14)
129#define RS_PART_LAST (RS_MGT_BASE + 0x18)
130#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19)
131#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A)
132#define RS_PART_TARGET (RS_MGT_BASE + 0x1B)
133#define RS_PART_LUN (RS_MGT_BASE + 0x1C)
134#define RS_PART_DUP (RS_MGT_BASE + 0x1D)
135#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E)
136#define RS_PART_MAX (RS_MGT_BASE + 0x1F)
137#define RS_PART_CAP (RS_MGT_BASE + 0x20)
138#define RS_PART_STATE (RS_MGT_BASE + 0x21)
139#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22)
140#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23)
141#define RS_HS_ERROR (RS_MGT_BASE + 0x24)
142#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25)
143#define RS_BAD_PARAM (RS_MGT_BASE + 0x26)
144#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27)
145#define RS_FLS_BASE 0xB0 /*! base of VDA errors */
146#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00)
147#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01)
148#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02)
149#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03)
150#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04)
151#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05)
152#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06)
153#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07)
154#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08)
155#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */
156#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0)
157#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1)
158#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2)
159#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3)
160#define RS_DEGRADED 0xFB /*! degraded mode */
161#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */
162#define RS_VDA_INTERNAL 0xFD /*! catch-all */
163#define RS_PENDING 0xFE /*! pending, not started */
164#define RS_STARTED 0xFF /*! started */
165
166
167/* flash request subfunctions. these are used in both the IOCTL and the
168 * driver-firmware interface (VDA_FUNC_FLASH). */
169
170#define VDA_FLASH_BEGINW 0x00
171#define VDA_FLASH_READ 0x01
172#define VDA_FLASH_WRITE 0x02
173#define VDA_FLASH_COMMIT 0x03
174#define VDA_FLASH_CANCEL 0x04
175#define VDA_FLASH_INFO 0x05
176#define VDA_FLASH_FREAD 0x06
177#define VDA_FLASH_FWRITE 0x07
178#define VDA_FLASH_FINFO 0x08
179
180
181/* IOCTL request subfunctions. these identify the payload type for
182 * VDA_FUNC_IOCTL.
183 */
184
185#define VDA_IOCTL_HBA 0x00
186#define VDA_IOCTL_CSMI 0x01
187#define VDA_IOCTL_SMP 0x02
188
189struct __packed atto_vda_devinfo {
190 struct atto_dev_addr dev_addr;
191 u8 vendor_id[8];
192 u8 product_id[16];
193 u8 revision[4];
194 u64 capacity;
195 u32 block_size;
196 u8 dev_type;
197
198 union {
199 u8 dev_status;
200 #define VDADEVSTAT_INVALID 0x00
201 #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID
202 #define VDADEVSTAT_ASSIGNED 0x01
203 #define VDADEVSTAT_SPARE 0x02
204 #define VDADEVSTAT_UNAVAIL 0x03
205 #define VDADEVSTAT_PT_MAINT 0x04
206 #define VDADEVSTAT_LCLSPARE 0x05
207 #define VDADEVSTAT_UNUSEABLE 0x06
208 #define VDADEVSTAT_AVAIL 0xFF
209
210 u8 op_ctrl;
211 #define VDA_DEV_OP_CTRL_START 0x01
212 #define VDA_DEV_OP_CTRL_HALT 0x02
213 #define VDA_DEV_OP_CTRL_RESUME 0x03
214 #define VDA_DEV_OP_CTRL_CANCEL 0x04
215 };
216
217 u8 member_state;
218 #define VDAMBRSTATE_ONLINE 0x00
219 #define VDAMBRSTATE_DEGRADED 0x01
220 #define VDAMBRSTATE_UNAVAIL 0x02
221 #define VDAMBRSTATE_FAULTED 0x03
222 #define VDAMBRSTATE_MISREAD 0x04
223 #define VDAMBRSTATE_INCOMPAT 0x05
224
225 u8 operation;
226 #define VDAOP_NONE 0x00
227 #define VDAOP_REBUILD 0x01
228 #define VDAOP_ERASE 0x02
229 #define VDAOP_PATTERN 0x03
230 #define VDAOP_CONVERSION 0x04
231 #define VDAOP_FULL_INIT 0x05
232 #define VDAOP_QUICK_INIT 0x06
233 #define VDAOP_SECT_SCAN 0x07
234 #define VDAOP_SECT_SCAN_PARITY 0x08
235 #define VDAOP_SECT_SCAN_PARITY_FIX 0x09
236 #define VDAOP_RECOV_REBUILD 0x0A
237
238 u8 op_status;
239 #define VDAOPSTAT_OK 0x00
240 #define VDAOPSTAT_FAULTED 0x01
241 #define VDAOPSTAT_HALTED 0x02
242 #define VDAOPSTAT_INT 0x03
243
244 u8 progress; /* 0 - 100% */
245 u16 ses_dev_index;
246 #define VDASESDI_INVALID 0xFFFF
247
248 u8 serial_no[32];
249
250 union {
251 u16 target_id;
252 #define VDATGTID_INVALID 0xFFFF
253
254 u16 features_mask;
255 };
256
257 u16 lun;
258 u16 features;
259 #define VDADEVFEAT_ENC_SERV 0x0001
260 #define VDADEVFEAT_IDENT 0x0002
261 #define VDADEVFEAT_DH_SUPP 0x0004
262 #define VDADEVFEAT_PHYS_ID 0x0008
263
264 u8 ses_element_id;
265 u8 link_speed;
266 #define VDALINKSPEED_UNKNOWN 0x00
267 #define VDALINKSPEED_1GB 0x01
268 #define VDALINKSPEED_1_5GB 0x02
269 #define VDALINKSPEED_2GB 0x03
270 #define VDALINKSPEED_3GB 0x04
271 #define VDALINKSPEED_4GB 0x05
272 #define VDALINKSPEED_6GB 0x06
273 #define VDALINKSPEED_8GB 0x07
274
275 u16 phys_target_id;
276 u8 reserved[2];
277};
278
279
280/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it
281 * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore,
282 * the entire structure is DMaed between the firmware and host buffer and
283 * the data will always be in little endian format.
284 */
285
286struct __packed atto_vda_devinfo2 {
287 struct atto_dev_addr dev_addr;
288 u8 vendor_id[8];
289 u8 product_id[16];
290 u8 revision[4];
291 u64 capacity;
292 u32 block_size;
293 u8 dev_type;
294 u8 dev_status;
295 u8 member_state;
296 u8 operation;
297 u8 op_status;
298 u8 progress;
299 u16 ses_dev_index;
300 u8 serial_no[32];
301 union {
302 u16 target_id;
303 u16 features_mask;
304 };
305
306 u16 lun;
307 u16 features;
308 u8 ses_element_id;
309 u8 link_speed;
310 u16 phys_target_id;
311 u8 reserved[2];
312
313/* This is where fields specific to struct atto_vda_devinfo2 begin. Note
314 * that the structure version started at one so applications that unionize this
315 * structure with atto_vda_dev_info can differentiate them if desired.
316 */
317
318 u8 version;
319 #define VDADEVINFO_VERSION0 0x00
320 #define VDADEVINFO_VERSION1 0x01
321 #define VDADEVINFO_VERSION2 0x02
322 #define VDADEVINFO_VERSION3 0x03
323 #define VDADEVINFO_VERSION VDADEVINFO_VERSION3
324
325 u8 reserved2[3];
326
327 /* sector scanning fields */
328
329 u32 ss_curr_errors;
330 u64 ss_curr_scanned;
331 u32 ss_curr_recvrd;
332 u32 ss_scan_length;
333 u32 ss_total_errors;
334 u32 ss_total_recvrd;
335 u32 ss_num_scans;
336
337 /* grp_name was added in version 2 of this structure. */
338
339 char grp_name[15];
340 u8 reserved3[4];
341
342 /* dev_addr_list was added in version 3 of this structure. */
343
344 u8 num_dev_addr;
345 struct atto_dev_addr2 dev_addr_list[8];
346};
347
348
349struct __packed atto_vda_grp_info {
350 u8 grp_index;
351 #define VDA_MAX_RAID_GROUPS 32
352
353 char grp_name[15];
354 u64 capacity;
355 u32 block_size;
356 u32 interleave;
357 u8 type;
358 #define VDA_GRP_TYPE_RAID0 0
359 #define VDA_GRP_TYPE_RAID1 1
360 #define VDA_GRP_TYPE_RAID4 4
361 #define VDA_GRP_TYPE_RAID5 5
362 #define VDA_GRP_TYPE_RAID6 6
363 #define VDA_GRP_TYPE_RAID10 10
364 #define VDA_GRP_TYPE_RAID40 40
365 #define VDA_GRP_TYPE_RAID50 50
366 #define VDA_GRP_TYPE_RAID60 60
367 #define VDA_GRP_TYPE_DVRAID_HS 252
368 #define VDA_GRP_TYPE_DVRAID_NOHS 253
369 #define VDA_GRP_TYPE_JBOD 254
370 #define VDA_GRP_TYPE_SPARE 255
371
372 union {
373 u8 status;
374 #define VDA_GRP_STAT_INVALID 0x00
375 #define VDA_GRP_STAT_NEW 0x01
376 #define VDA_GRP_STAT_WAITING 0x02
377 #define VDA_GRP_STAT_ONLINE 0x03
378 #define VDA_GRP_STAT_DEGRADED 0x04
379 #define VDA_GRP_STAT_OFFLINE 0x05
380 #define VDA_GRP_STAT_DELETED 0x06
381 #define VDA_GRP_STAT_RECOV_BASIC 0x07
382 #define VDA_GRP_STAT_RECOV_EXTREME 0x08
383
384 u8 op_ctrl;
385 #define VDA_GRP_OP_CTRL_START 0x01
386 #define VDA_GRP_OP_CTRL_HALT 0x02
387 #define VDA_GRP_OP_CTRL_RESUME 0x03
388 #define VDA_GRP_OP_CTRL_CANCEL 0x04
389 };
390
391 u8 rebuild_state;
392 #define VDA_RBLD_NONE 0x00
393 #define VDA_RBLD_REBUILD 0x01
394 #define VDA_RBLD_ERASE 0x02
395 #define VDA_RBLD_PATTERN 0x03
396 #define VDA_RBLD_CONV 0x04
397 #define VDA_RBLD_FULL_INIT 0x05
398 #define VDA_RBLD_QUICK_INIT 0x06
399 #define VDA_RBLD_SECT_SCAN 0x07
400 #define VDA_RBLD_SECT_SCAN_PARITY 0x08
401 #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09
402 #define VDA_RBLD_RECOV_REBUILD 0x0A
403 #define VDA_RBLD_RECOV_BASIC 0x0B
404 #define VDA_RBLD_RECOV_EXTREME 0x0C
405
406 u8 span_depth;
407 u8 progress;
408 u8 mirror_width;
409 u8 stripe_width;
410 u8 member_cnt;
411
412 union {
413 u16 members[32];
414 #define VDA_MEMBER_MISSING 0xFFFF
415 #define VDA_MEMBER_NEW 0xFFFE
416 u16 features_mask;
417 };
418
419 u16 features;
420 #define VDA_GRP_FEAT_HOTSWAP 0x0001
421 #define VDA_GRP_FEAT_SPDRD_MASK 0x0006
422 #define VDA_GRP_FEAT_SPDRD_DIS 0x0000
423 #define VDA_GRP_FEAT_SPDRD_ENB 0x0002
424 #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004
425 #define VDA_GRP_FEAT_IDENT 0x0008
426 #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030
427 #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010
428 #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020
429 #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030
430 #define VDA_GRP_FEAT_WRITE_CACHE 0x0040
431 #define VDA_GRP_FEAT_RBLD_RESUME 0x0080
432 #define VDA_GRP_FEAT_SECT_RESUME 0x0100
433 #define VDA_GRP_FEAT_INIT_RESUME 0x0200
434 #define VDA_GRP_FEAT_SSD 0x0400
435 #define VDA_GRP_FEAT_BOOT_DEV 0x0800
436
437 /*
438 * for backward compatibility, a prefetch value of zero means the
439 * setting is ignored/unsupported. therefore, the firmware supported
440 * 0-6 values are incremented to 1-7.
441 */
442
443 u8 prefetch;
444 u8 op_status;
445 #define VDAGRPOPSTAT_MASK 0x0F
446 #define VDAGRPOPSTAT_INVALID 0x00
447 #define VDAGRPOPSTAT_OK 0x01
448 #define VDAGRPOPSTAT_FAULTED 0x02
449 #define VDAGRPOPSTAT_HALTED 0x03
450 #define VDAGRPOPSTAT_INT 0x04
451 #define VDAGRPOPPROC_MASK 0xF0
452 #define VDAGRPOPPROC_STARTABLE 0x10
453 #define VDAGRPOPPROC_CANCELABLE 0x20
454 #define VDAGRPOPPROC_RESUMABLE 0x40
455 #define VDAGRPOPPROC_HALTABLE 0x80
456 u8 over_provision;
457 u8 reserved[3];
458
459};
460
461
462struct __packed atto_vdapart_info {
463 u8 part_no;
464 #define VDA_MAX_PARTITIONS 128
465
466 char grp_name[15];
467 u64 part_size;
468 u64 start_lba;
469 u32 block_size;
470 u16 target_id;
471 u8 LUN;
472 char serial_no[41];
473 u8 features;
474 #define VDAPI_FEAT_WRITE_CACHE 0x01
475
476 u8 reserved[7];
477};
478
479
480struct __packed atto_vda_dh_info {
481 u8 req_type;
482 #define VDADH_RQTYPE_CACHE 0x01
483 #define VDADH_RQTYPE_FETCH 0x02
484 #define VDADH_RQTYPE_SET_STAT 0x03
485 #define VDADH_RQTYPE_GET_STAT 0x04
486
487 u8 req_qual;
488 #define VDADH_RQQUAL_SMART 0x01
489 #define VDADH_RQQUAL_MEDDEF 0x02
490 #define VDADH_RQQUAL_INFOEXC 0x04
491
492 u8 num_smart_attribs;
493 u8 status;
494 #define VDADH_STAT_DISABLE 0x00
495 #define VDADH_STAT_ENABLE 0x01
496
497 u32 med_defect_cnt;
498 u32 info_exc_cnt;
499 u8 smart_status;
500 #define VDADH_SMARTSTAT_OK 0x00
501 #define VDADH_SMARTSTAT_ERR 0x01
502
503 u8 reserved[35];
504 struct atto_vda_sge sge[1];
505};
506
507
508struct __packed atto_vda_dh_smart {
509 u8 attrib_id;
510 u8 current_val;
511 u8 worst;
512 u8 threshold;
513 u8 raw_data[6];
514 u8 raw_attrib_status;
515 #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01
516 #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02
517 #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04
518 #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08
519 #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10
520 #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20
521
522 u8 calc_attrib_status;
523 #define VDADHSM_CALCSTAT_UNKNOWN 0x00
524 #define VDADHSM_CALCSTAT_GOOD 0x01
525 #define VDADHSM_CALCSTAT_PREFAIL 0x02
526 #define VDADHSM_CALCSTAT_OLDAGE 0x03
527
528 u8 reserved[4];
529};
530
531
532struct __packed atto_vda_metrics_info {
533 u8 data_version;
534 #define VDAMET_VERSION0 0x00
535 #define VDAMET_VERSION VDAMET_VERSION0
536
537 u8 metrics_action;
538 #define VDAMET_METACT_NONE 0x00
539 #define VDAMET_METACT_START 0x01
540 #define VDAMET_METACT_STOP 0x02
541 #define VDAMET_METACT_RETRIEVE 0x03
542 #define VDAMET_METACT_CLEAR 0x04
543
544 u8 test_action;
545 #define VDAMET_TSTACT_NONE 0x00
546 #define VDAMET_TSTACT_STRT_INIT 0x01
547 #define VDAMET_TSTACT_STRT_READ 0x02
548 #define VDAMET_TSTACT_STRT_VERIFY 0x03
549 #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04
550 #define VDAMET_TSTACT_STOP 0x05
551
552 u8 num_dev_indexes;
553 #define VDAMET_ALL_DEVICES 0xFF
554
555 u16 dev_indexes[32];
556 u8 reserved[12];
557 struct atto_vda_sge sge[1];
558};
559
560
561struct __packed atto_vda_metrics_data {
562 u16 dev_index;
563 u16 length;
564 #define VDAMD_LEN_LAST 0x8000
565 #define VDAMD_LEN_MASK 0x0FFF
566
567 u32 flags;
568 #define VDAMDF_RUN 0x00000007
569 #define VDAMDF_RUN_READ 0x00000001
570 #define VDAMDF_RUN_WRITE 0x00000002
571 #define VDAMDF_RUN_ALL 0x00000004
572 #define VDAMDF_READ 0x00000010
573 #define VDAMDF_WRITE 0x00000020
574 #define VDAMDF_ALL 0x00000040
575 #define VDAMDF_DRIVETEST 0x40000000
576 #define VDAMDF_NEW 0x80000000
577
578 u64 total_read_data;
579 u64 total_write_data;
580 u64 total_read_io;
581 u64 total_write_io;
582 u64 read_start_time;
583 u64 read_stop_time;
584 u64 write_start_time;
585 u64 write_stop_time;
586 u64 read_maxio_time;
587 u64 wpvdadmetricsdatarite_maxio_time;
588 u64 read_totalio_time;
589 u64 write_totalio_time;
590 u64 read_total_errs;
591 u64 write_total_errs;
592 u64 read_recvrd_errs;
593 u64 write_recvrd_errs;
594 u64 miscompares;
595};
596
597
598struct __packed atto_vda_schedule_info {
599 u8 schedule_type;
600 #define VDASI_SCHTYPE_ONETIME 0x01
601 #define VDASI_SCHTYPE_DAILY 0x02
602 #define VDASI_SCHTYPE_WEEKLY 0x03
603
604 u8 operation;
605 #define VDASI_OP_NONE 0x00
606 #define VDASI_OP_CREATE 0x01
607 #define VDASI_OP_CANCEL 0x02
608
609 u8 hour;
610 u8 minute;
611 u8 day;
612 #define VDASI_DAY_NONE 0x00
613
614 u8 progress;
615 #define VDASI_PROG_NONE 0xFF
616
617 u8 event_type;
618 #define VDASI_EVTTYPE_SECT_SCAN 0x01
619 #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02
620 #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03
621
622 u8 recurrences;
623 #define VDASI_RECUR_FOREVER 0x00
624
625 u32 id;
626 #define VDASI_ID_NONE 0x00
627
628 char grp_name[15];
629 u8 reserved[85];
630};
631
632
633struct __packed atto_vda_n_vcache_info {
634 u8 super_cap_status;
635 #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00
636 #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01
637 #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02
638
639 u8 nvcache_module_status;
640 #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00
641 #define VDANVCI_NVCACHEMODULE_PRESENT 0x01
642
643 u8 protection_mode;
644 #define VDANVCI_PROTMODE_HI_PROTECT 0x00
645 #define VDANVCI_PROTMODE_HI_PERFORM 0x01
646
647 u8 reserved[109];
648};
649
650
651struct __packed atto_vda_buzzer_info {
652 u8 status;
653 #define VDABUZZI_BUZZER_OFF 0x00
654 #define VDABUZZI_BUZZER_ON 0x01
655 #define VDABUZZI_BUZZER_LAST 0x02
656
657 u8 reserved[3];
658 u32 duration;
659 #define VDABUZZI_DURATION_INDEFINITE 0xffffffff
660
661 u8 reserved2[104];
662};
663
664
665struct __packed atto_vda_adapter_info {
666 u8 version;
667 #define VDAADAPINFO_VERSION0 0x00
668 #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0
669
670 u8 reserved;
671 signed short utc_offset;
672 u32 utc_time;
673 u32 features;
674 #define VDA_ADAP_FEAT_IDENT 0x0001
675 #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002
676 #define VDA_ADAP_FEAT_UTC_TIME 0x0004
677
678 u32 valid_features;
679 char active_config[33];
680 u8 temp_count;
681 u8 fan_count;
682 u8 reserved3[61];
683};
684
685
686struct __packed atto_vda_temp_info {
687 u8 temp_index;
688 u8 max_op_temp;
689 u8 min_op_temp;
690 u8 op_temp_warn;
691 u8 temperature;
692 u8 type;
693 #define VDA_TEMP_TYPE_CPU 1
694
695 u8 reserved[106];
696};
697
698
699struct __packed atto_vda_fan_info {
700 u8 fan_index;
701 u8 status;
702 #define VDA_FAN_STAT_UNKNOWN 0
703 #define VDA_FAN_STAT_NORMAL 1
704 #define VDA_FAN_STAT_FAIL 2
705
706 u16 crit_pvdafaninfothreshold;
707 u16 warn_threshold;
708 u16 speed;
709 u8 reserved[104];
710};
711
712
713/* VDA management commands */
714
715#define VDAMGT_DEV_SCAN 0x00
716#define VDAMGT_DEV_INFO 0x01
717#define VDAMGT_DEV_CLEAN 0x02
718#define VDAMGT_DEV_IDENTIFY 0x03
719#define VDAMGT_DEV_IDENTSTOP 0x04
720#define VDAMGT_DEV_PT_INFO 0x05
721#define VDAMGT_DEV_FEATURES 0x06
722#define VDAMGT_DEV_PT_FEATURES 0x07
723#define VDAMGT_DEV_HEALTH_REQ 0x08
724#define VDAMGT_DEV_METRICS 0x09
725#define VDAMGT_DEV_INFO2 0x0A
726#define VDAMGT_DEV_OPERATION 0x0B
727#define VDAMGT_DEV_INFO2_BYADDR 0x0C
728#define VDAMGT_GRP_INFO 0x10
729#define VDAMGT_GRP_CREATE 0x11
730#define VDAMGT_GRP_DELETE 0x12
731#define VDAMGT_ADD_STORAGE 0x13
732#define VDAMGT_MEMBER_ADD 0x14
733#define VDAMGT_GRP_COMMIT 0x15
734#define VDAMGT_GRP_REBUILD 0x16
735#define VDAMGT_GRP_COMMIT_INIT 0x17
736#define VDAMGT_QUICK_RAID 0x18
737#define VDAMGT_GRP_FEATURES 0x19
738#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A
739#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B
740#define VDAMGT_GRP_OPERATION 0x1C
741#define VDAMGT_CFG_SAVE 0x20
742#define VDAMGT_LAST_ERROR 0x21
743#define VDAMGT_ADAP_INFO 0x22
744#define VDAMGT_ADAP_FEATURES 0x23
745#define VDAMGT_TEMP_INFO 0x24
746#define VDAMGT_FAN_INFO 0x25
747#define VDAMGT_PART_INFO 0x30
748#define VDAMGT_PART_MAP 0x31
749#define VDAMGT_PART_UNMAP 0x32
750#define VDAMGT_PART_AUTOMAP 0x33
751#define VDAMGT_PART_SPLIT 0x34
752#define VDAMGT_PART_MERGE 0x35
753#define VDAMGT_SPARE_LIST 0x40
754#define VDAMGT_SPARE_ADD 0x41
755#define VDAMGT_SPARE_REMOVE 0x42
756#define VDAMGT_LOCAL_SPARE_ADD 0x43
757#define VDAMGT_SCHEDULE_EVENT 0x50
758#define VDAMGT_SCHEDULE_INFO 0x51
759#define VDAMGT_NVCACHE_INFO 0x60
760#define VDAMGT_NVCACHE_SET 0x61
761#define VDAMGT_BUZZER_INFO 0x70
762#define VDAMGT_BUZZER_SET 0x71
763
764
765struct __packed atto_vda_ae_hdr {
766 u8 bylength;
767 u8 byflags;
768 #define VDAAE_HDRF_EVENT_ACK 0x01
769
770 u8 byversion;
771 #define VDAAE_HDR_VER_0 0
772
773 u8 bytype;
774 #define VDAAE_HDR_TYPE_RAID 1
775 #define VDAAE_HDR_TYPE_LU 2
776 #define VDAAE_HDR_TYPE_DISK 3
777 #define VDAAE_HDR_TYPE_RESET 4
778 #define VDAAE_HDR_TYPE_LOG_INFO 5
779 #define VDAAE_HDR_TYPE_LOG_WARN 6
780 #define VDAAE_HDR_TYPE_LOG_CRIT 7
781 #define VDAAE_HDR_TYPE_LOG_FAIL 8
782 #define VDAAE_HDR_TYPE_NVC 9
783 #define VDAAE_HDR_TYPE_TLG_INFO 10
784 #define VDAAE_HDR_TYPE_TLG_WARN 11
785 #define VDAAE_HDR_TYPE_TLG_CRIT 12
786 #define VDAAE_HDR_TYPE_PWRMGT 13
787 #define VDAAE_HDR_TYPE_MUTE 14
788 #define VDAAE_HDR_TYPE_DEV 15
789};
790
791
792struct __packed atto_vda_ae_raid {
793 struct atto_vda_ae_hdr hdr;
794 u32 dwflags;
795 #define VDAAE_GROUP_STATE 0x00000001
796 #define VDAAE_RBLD_STATE 0x00000002
797 #define VDAAE_RBLD_PROG 0x00000004
798 #define VDAAE_MEMBER_CHG 0x00000008
799 #define VDAAE_PART_CHG 0x00000010
800 #define VDAAE_MEM_STATE_CHG 0x00000020
801
802 u8 bygroup_state;
803 #define VDAAE_RAID_INVALID 0
804 #define VDAAE_RAID_NEW 1
805 #define VDAAE_RAID_WAITING 2
806 #define VDAAE_RAID_ONLINE 3
807 #define VDAAE_RAID_DEGRADED 4
808 #define VDAAE_RAID_OFFLINE 5
809 #define VDAAE_RAID_DELETED 6
810 #define VDAAE_RAID_BASIC 7
811 #define VDAAE_RAID_EXTREME 8
812 #define VDAAE_RAID_UNKNOWN 9
813
814 u8 byrebuild_state;
815 #define VDAAE_RBLD_NONE 0
816 #define VDAAE_RBLD_REBUILD 1
817 #define VDAAE_RBLD_ERASE 2
818 #define VDAAE_RBLD_PATTERN 3
819 #define VDAAE_RBLD_CONV 4
820 #define VDAAE_RBLD_FULL_INIT 5
821 #define VDAAE_RBLD_QUICK_INIT 6
822 #define VDAAE_RBLD_SECT_SCAN 7
823 #define VDAAE_RBLD_SECT_SCAN_PARITY 8
824 #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9
825 #define VDAAE_RBLD_RECOV_REBUILD 10
826 #define VDAAE_RBLD_UNKNOWN 11
827
828 u8 byrebuild_progress;
829 u8 op_status;
830 #define VDAAE_GRPOPSTAT_MASK 0x0F
831 #define VDAAE_GRPOPSTAT_INVALID 0x00
832 #define VDAAE_GRPOPSTAT_OK 0x01
833 #define VDAAE_GRPOPSTAT_FAULTED 0x02
834 #define VDAAE_GRPOPSTAT_HALTED 0x03
835 #define VDAAE_GRPOPSTAT_INT 0x04
836 #define VDAAE_GRPOPPROC_MASK 0xF0
837 #define VDAAE_GRPOPPROC_STARTABLE 0x10
838 #define VDAAE_GRPOPPROC_CANCELABLE 0x20
839 #define VDAAE_GRPOPPROC_RESUMABLE 0x40
840 #define VDAAE_GRPOPPROC_HALTABLE 0x80
841 char acname[15];
842 u8 byreserved;
843 u8 byreserved2[0x80 - 0x1C];
844};
845
846
847struct __packed atto_vda_ae_lu_tgt_lun {
848 u16 wtarget_id;
849 u8 bylun;
850 u8 byreserved;
851};
852
853
854struct __packed atto_vda_ae_lu_tgt_lun_raid {
855 u16 wtarget_id;
856 u8 bylun;
857 u8 byreserved;
858 u32 dwinterleave;
859 u32 dwblock_size;
860};
861
862
863struct __packed atto_vda_ae_lu {
864 struct atto_vda_ae_hdr hdr;
865 u32 dwevent;
866 #define VDAAE_LU_DISC 0x00000001
867 #define VDAAE_LU_LOST 0x00000002
868 #define VDAAE_LU_STATE 0x00000004
869 #define VDAAE_LU_PASSTHROUGH 0x10000000
870 #define VDAAE_LU_PHYS_ID 0x20000000
871
872 u8 bystate;
873 #define VDAAE_LU_UNDEFINED 0
874 #define VDAAE_LU_NOT_PRESENT 1
875 #define VDAAE_LU_OFFLINE 2
876 #define VDAAE_LU_ONLINE 3
877 #define VDAAE_LU_DEGRADED 4
878 #define VDAAE_LU_FACTORY_DISABLED 5
879 #define VDAAE_LU_DELETED 6
880 #define VDAAE_LU_BUSSCAN 7
881 #define VDAAE_LU_UNKNOWN 8
882
883 u8 byreserved;
884 u16 wphys_target_id;
885
886 union {
887 struct atto_vda_ae_lu_tgt_lun tgtlun;
888 struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid;
889 } id;
890};
891
892
893struct __packed atto_vda_ae_disk {
894 struct atto_vda_ae_hdr hdr;
895};
896
897
898#define VDAAE_LOG_STRSZ 64
899
900struct __packed atto_vda_ae_log {
901 struct atto_vda_ae_hdr hdr;
902 char aclog_ascii[VDAAE_LOG_STRSZ];
903};
904
905
906#define VDAAE_TLG_STRSZ 56
907
908struct __packed atto_vda_ae_timestamp_log {
909 struct atto_vda_ae_hdr hdr;
910 u32 dwtimestamp;
911 char aclog_ascii[VDAAE_TLG_STRSZ];
912};
913
914
915struct __packed atto_vda_ae_nvc {
916 struct atto_vda_ae_hdr hdr;
917};
918
919
920struct __packed atto_vda_ae_dev {
921 struct atto_vda_ae_hdr hdr;
922 struct atto_dev_addr devaddr;
923};
924
925
926union atto_vda_ae {
927 struct atto_vda_ae_hdr hdr;
928 struct atto_vda_ae_disk disk;
929 struct atto_vda_ae_lu lu;
930 struct atto_vda_ae_raid raid;
931 struct atto_vda_ae_log log;
932 struct atto_vda_ae_timestamp_log tslog;
933 struct atto_vda_ae_nvc nvcache;
934 struct atto_vda_ae_dev dev;
935};
936
937
938struct __packed atto_vda_date_and_time {
939 u8 flags;
940 #define VDA_DT_DAY_MASK 0x07
941 #define VDA_DT_DAY_NONE 0x00
942 #define VDA_DT_DAY_SUN 0x01
943 #define VDA_DT_DAY_MON 0x02
944 #define VDA_DT_DAY_TUE 0x03
945 #define VDA_DT_DAY_WED 0x04
946 #define VDA_DT_DAY_THU 0x05
947 #define VDA_DT_DAY_FRI 0x06
948 #define VDA_DT_DAY_SAT 0x07
949 #define VDA_DT_PM 0x40
950 #define VDA_DT_MILITARY 0x80
951
952 u8 seconds;
953 u8 minutes;
954 u8 hours;
955 u8 day;
956 u8 month;
957 u16 year;
958};
959
960#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */
961#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */
962#define SGE_LAST 0x01000000 /*! last entry */
963#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */
964#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */
965#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */
966#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */
967
968
969struct __packed atto_vda_cfg_init {
970 struct atto_vda_date_and_time date_time;
971 u32 sgl_page_size;
972 u32 vda_version;
973 u32 fw_version;
974 u32 fw_build;
975 u32 fw_release;
976 u32 epoch_time;
977 u32 ioctl_tunnel;
978 #define VDA_ITF_MEM_RW 0x00000001
979 #define VDA_ITF_TRACE 0x00000002
980 #define VDA_ITF_SCSI_PASS_THRU 0x00000004
981 #define VDA_ITF_GET_DEV_ADDR 0x00000008
982 #define VDA_ITF_PHY_CTRL 0x00000010
983 #define VDA_ITF_CONN_CTRL 0x00000020
984 #define VDA_ITF_GET_DEV_INFO 0x00000040
985
986 u32 num_targets_backend;
987 u8 reserved[0x48];
988};
989
990
991/* configuration commands */
992
993#define VDA_CFG_INIT 0x00
994#define VDA_CFG_GET_INIT 0x01
995#define VDA_CFG_GET_INIT2 0x02
996
997
998/*! physical region descriptor (PRD) aka scatter/gather entry */
999
1000struct __packed atto_physical_region_description {
1001 u64 address;
1002 u32 ctl_len;
1003 #define PRD_LEN_LIMIT 0x003FFFFF
1004 #define PRD_LEN_MAX 0x003FF000
1005 #define PRD_NXT_PRD_CNT 0x0000007F
1006 #define PRD_CHAIN 0x01000000
1007 #define PRD_DATA 0x00000000
1008 #define PRD_INT_SEL 0xF0000000
1009 #define PRD_INT_SEL_F0 0x00000000
1010 #define PRD_INT_SEL_F1 0x40000000
1011 #define PRD_INT_SEL_F2 0x80000000
1012 #define PRD_INT_SEL_F3 0xc0000000
1013 #define PRD_INT_SEL_SRAM 0x10000000
1014 #define PRD_INT_SEL_PBSR 0x20000000
1015
1016};
1017
1018/* Request types. NOTE that ALL requests have the same layout for the first
1019 * few bytes.
1020 */
1021struct __packed atto_vda_req_header {
1022 u32 length;
1023 u8 function;
1024 u8 variable1;
1025 u8 chain_offset;
1026 u8 sg_list_offset;
1027 u32 handle;
1028};
1029
1030
1031#define FCP_CDB_SIZE 16
1032
1033struct __packed atto_vda_scsi_req {
1034 u32 length;
1035 u8 function; /* VDA_FUNC_SCSI */
1036 u8 sense_len;
1037 u8 chain_offset;
1038 u8 sg_list_offset;
1039 u32 handle;
1040 u32 flags;
1041 #define FCP_CMND_LUN_MASK 0x000000FF
1042 #define FCP_CMND_TA_MASK 0x00000700
1043 #define FCP_CMND_TA_SIMPL_Q 0x00000000
1044 #define FCP_CMND_TA_HEAD_Q 0x00000100
1045 #define FCP_CMND_TA_ORDRD_Q 0x00000200
1046 #define FCP_CMND_TA_ACA 0x00000400
1047 #define FCP_CMND_PRI_MASK 0x00007800
1048 #define FCP_CMND_TM_MASK 0x00FF0000
1049 #define FCP_CMND_ATS 0x00020000
1050 #define FCP_CMND_CTS 0x00040000
1051 #define FCP_CMND_LRS 0x00100000
1052 #define FCP_CMND_TRS 0x00200000
1053 #define FCP_CMND_CLA 0x00400000
1054 #define FCP_CMND_TRM 0x00800000
1055 #define FCP_CMND_DATA_DIR 0x03000000
1056 #define FCP_CMND_WRD 0x01000000
1057 #define FCP_CMND_RDD 0x02000000
1058
1059 u8 cdb[FCP_CDB_SIZE];
1060 union {
1061 struct __packed {
1062 u64 ppsense_buf;
1063 u16 target_id;
1064 u8 iblk_cnt_prd;
1065 u8 reserved;
1066 };
1067
1068 struct atto_physical_region_description sense_buff_prd;
1069 };
1070
1071 union {
1072 struct atto_vda_sge sge[1];
1073
1074 u32 abort_handle;
1075 u32 dwords[245];
1076 struct atto_physical_region_description prd[1];
1077 } u;
1078};
1079
1080
1081struct __packed atto_vda_flash_req {
1082 u32 length;
1083 u8 function; /* VDA_FUNC_FLASH */
1084 u8 sub_func;
1085 u8 chain_offset;
1086 u8 sg_list_offset;
1087 u32 handle;
1088 u32 flash_addr;
1089 u8 checksum;
1090 u8 rsvd[3];
1091
1092 union {
1093 struct {
1094 char file_name[16]; /* 8.3 fname, NULL term, wc=* */
1095 struct atto_vda_sge sge[1];
1096 } file;
1097
1098 struct atto_vda_sge sge[1];
1099 struct atto_physical_region_description prde[2];
1100 } data;
1101};
1102
1103
1104struct __packed atto_vda_diag_req {
1105 u32 length;
1106 u8 function; /* VDA_FUNC_DIAG */
1107 u8 sub_func;
1108 #define VDA_DIAG_STATUS 0x00
1109 #define VDA_DIAG_RESET 0x01
1110 #define VDA_DIAG_PAUSE 0x02
1111 #define VDA_DIAG_RESUME 0x03
1112 #define VDA_DIAG_READ 0x04
1113 #define VDA_DIAG_WRITE 0x05
1114
1115 u8 chain_offset;
1116 u8 sg_list_offset;
1117 u32 handle;
1118 u32 rsvd;
1119 u64 local_addr;
1120 struct atto_vda_sge sge[1];
1121};
1122
1123
1124struct __packed atto_vda_ae_req {
1125 u32 length;
1126 u8 function; /* VDA_FUNC_AE */
1127 u8 reserved1;
1128 u8 chain_offset;
1129 u8 sg_list_offset;
1130 u32 handle;
1131
1132 union {
1133 struct atto_vda_sge sge[1];
1134 struct atto_physical_region_description prde[1];
1135 };
1136};
1137
1138
1139struct __packed atto_vda_cli_req {
1140 u32 length;
1141 u8 function; /* VDA_FUNC_CLI */
1142 u8 reserved1;
1143 u8 chain_offset;
1144 u8 sg_list_offset;
1145 u32 handle;
1146 u32 cmd_rsp_len;
1147 struct atto_vda_sge sge[1];
1148};
1149
1150
1151struct __packed atto_vda_ioctl_req {
1152 u32 length;
1153 u8 function; /* VDA_FUNC_IOCTL */
1154 u8 sub_func;
1155 u8 chain_offset;
1156 u8 sg_list_offset;
1157 u32 handle;
1158
1159 union {
1160 struct atto_vda_sge reserved_sge;
1161 struct atto_physical_region_description reserved_prde;
1162 };
1163
1164 union {
1165 struct {
1166 u32 ctrl_code;
1167 u16 target_id;
1168 u8 lun;
1169 u8 reserved;
1170 } csmi;
1171 };
1172
1173 union {
1174 struct atto_vda_sge sge[1];
1175 struct atto_physical_region_description prde[1];
1176 };
1177};
1178
1179
1180struct __packed atto_vda_cfg_req {
1181 u32 length;
1182 u8 function; /* VDA_FUNC_CFG */
1183 u8 sub_func;
1184 u8 rsvd1;
1185 u8 sg_list_offset;
1186 u32 handle;
1187
1188 union {
1189 u8 bytes[116];
1190 struct atto_vda_cfg_init init;
1191 struct atto_vda_sge sge;
1192 struct atto_physical_region_description prde;
1193 } data;
1194};
1195
1196
1197struct __packed atto_vda_mgmt_req {
1198 u32 length;
1199 u8 function; /* VDA_FUNC_MGT */
1200 u8 mgt_func;
1201 u8 chain_offset;
1202 u8 sg_list_offset;
1203 u32 handle;
1204 u8 scan_generation;
1205 u8 payld_sglst_offset;
1206 u16 dev_index;
1207 u32 payld_length;
1208 u32 pad;
1209 union {
1210 struct atto_vda_sge sge[2];
1211 struct atto_physical_region_description prde[2];
1212 };
1213 struct atto_vda_sge payld_sge[1];
1214};
1215
1216
1217union atto_vda_req {
1218 struct atto_vda_scsi_req scsi;
1219 struct atto_vda_flash_req flash;
1220 struct atto_vda_diag_req diag;
1221 struct atto_vda_ae_req ae;
1222 struct atto_vda_cli_req cli;
1223 struct atto_vda_ioctl_req ioctl;
1224 struct atto_vda_cfg_req cfg;
1225 struct atto_vda_mgmt_req mgt;
1226 u8 bytes[1024];
1227};
1228
1229/* Outbound response structures */
1230
1231struct __packed atto_vda_scsi_rsp {
1232 u8 scsi_stat;
1233 u8 sense_len;
1234 u8 rsvd[2];
1235 u32 residual_length;
1236};
1237
1238struct __packed atto_vda_flash_rsp {
1239 u32 file_size;
1240};
1241
1242struct __packed atto_vda_ae_rsp {
1243 u32 length;
1244};
1245
1246struct __packed atto_vda_cli_rsp {
1247 u32 cmd_rsp_len;
1248};
1249
1250struct __packed atto_vda_ioctl_rsp {
1251 union {
1252 struct {
1253 u32 csmi_status;
1254 u16 target_id;
1255 u8 lun;
1256 u8 reserved;
1257 } csmi;
1258 };
1259};
1260
1261struct __packed atto_vda_cfg_rsp {
1262 u16 vda_version;
1263 u16 fw_release;
1264 u32 fw_build;
1265};
1266
1267struct __packed atto_vda_mgmt_rsp {
1268 u32 length;
1269 u16 dev_index;
1270 u8 scan_generation;
1271};
1272
1273union atto_vda_func_rsp {
1274 struct atto_vda_scsi_rsp scsi_rsp;
1275 struct atto_vda_flash_rsp flash_rsp;
1276 struct atto_vda_ae_rsp ae_rsp;
1277 struct atto_vda_cli_rsp cli_rsp;
1278 struct atto_vda_ioctl_rsp ioctl_rsp;
1279 struct atto_vda_cfg_rsp cfg_rsp;
1280 struct atto_vda_mgmt_rsp mgt_rsp;
1281 u32 dwords[2];
1282};
1283
1284struct __packed atto_vda_ob_rsp {
1285 u32 handle;
1286 u8 req_stat;
1287 u8 rsvd[3];
1288
1289 union atto_vda_func_rsp
1290 func_rsp;
1291};
1292
1293struct __packed atto_vda_ae_data {
1294 u8 event_data[256];
1295};
1296
1297struct __packed atto_vda_mgmt_data {
1298 union {
1299 u8 bytes[112];
1300 struct atto_vda_devinfo dev_info;
1301 struct atto_vda_grp_info grp_info;
1302 struct atto_vdapart_info part_info;
1303 struct atto_vda_dh_info dev_health_info;
1304 struct atto_vda_metrics_info metrics_info;
1305 struct atto_vda_schedule_info sched_info;
1306 struct atto_vda_n_vcache_info nvcache_info;
1307 struct atto_vda_buzzer_info buzzer_info;
1308 } data;
1309};
1310
1311union atto_vda_rsp_data {
1312 struct atto_vda_ae_data ae_data;
1313 struct atto_vda_mgmt_data mgt_data;
1314 u8 sense_data[252];
1315 #define SENSE_DATA_SZ 252;
1316 u8 bytes[256];
1317};
1318
1319#endif
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
new file mode 100644
index 000000000000..0838e265e0b9
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -0,0 +1,1441 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r.h
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include <linux/kernel.h>
45#include <linux/delay.h>
46#include <linux/pci.h>
47#include <linux/proc_fs.h>
48#include <linux/workqueue.h>
49#include <linux/interrupt.h>
50#include <linux/module.h>
51#include <linux/vmalloc.h>
52#include <scsi/scsi.h>
53#include <scsi/scsi_host.h>
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_eh.h>
57#include <scsi/scsi_tcq.h>
58
59#include "esas2r_log.h"
60#include "atioctl.h"
61#include "atvda.h"
62
63#ifndef ESAS2R_H
64#define ESAS2R_H
65
66/* Global Variables */
67extern struct esas2r_adapter *esas2r_adapters[];
68extern u8 *esas2r_buffered_ioctl;
69extern dma_addr_t esas2r_buffered_ioctl_addr;
70extern u32 esas2r_buffered_ioctl_size;
71extern struct pci_dev *esas2r_buffered_ioctl_pcid;
72#define SGL_PG_SZ_MIN 64
73#define SGL_PG_SZ_MAX 1024
74extern int sgl_page_size;
75#define NUM_SGL_MIN 8
76#define NUM_SGL_MAX 2048
77extern int num_sg_lists;
78#define NUM_REQ_MIN 4
79#define NUM_REQ_MAX 256
80extern int num_requests;
81#define NUM_AE_MIN 2
82#define NUM_AE_MAX 8
83extern int num_ae_requests;
84extern int cmd_per_lun;
85extern int can_queue;
86extern int esas2r_max_sectors;
87extern int sg_tablesize;
88extern int interrupt_mode;
89extern int num_io_requests;
90
91/* Macro defintions */
92#define ESAS2R_MAX_ID 255
93#define MAX_ADAPTERS 32
94#define ESAS2R_DRVR_NAME "esas2r"
95#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter"
96#define ESAS2R_MAX_DEVICES 32
97#define ATTONODE_NAME "ATTONode"
98#define ESAS2R_MAJOR_REV 1
99#define ESAS2R_MINOR_REV 00
100#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
101 DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
102#define ESAS2R_COPYRIGHT_YEARS "2001-2013"
103#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
104#define ESAS2R_DEFAULT_CMD_PER_LUN 64
105#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
106#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
107#define NUM_TO_STR(num) #num
108
109#define ESAS2R_SGL_ALIGN 16
110#define ESAS2R_LIST_ALIGN 16
111#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA
112#define ESAS2R_DATA_BUF_LEN 256
113#define ESAS2R_DEFAULT_TMO 5000
114#define ESAS2R_DISC_BUF_LEN 512
115#define ESAS2R_FWCOREDUMP_SZ 0x80000
116#define ESAS2R_NUM_PHYS 8
117#define ESAS2R_TARG_ID_INV 0xFFFF
118#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK
119#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK
120#define ESAS2R_INT_DIS_MASK 0
121#define ESAS2R_MAX_TARGETS 256
122#define ESAS2R_KOBJ_NAME_LEN 20
123
124/* u16 (WORD) component macros */
125#define LOBYTE(w) ((u8)(u16)(w))
126#define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
127#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
128
129/* u32 (DWORD) component macros */
130#define LOWORD(d) ((u16)(u32)(d))
131#define HIWORD(d) ((u16)(((u32)(d)) >> 16))
132#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
133
134/* macro to get the lowest nonzero bit of a value */
135#define LOBIT(x) ((x) & (0 - (x)))
136
137/* These functions are provided to access the chip's control registers.
138 * The register is specified by its byte offset from the register base
139 * for the adapter.
140 */
141#define esas2r_read_register_dword(a, reg) \
142 readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
143
144#define esas2r_write_register_dword(a, reg, data) \
145 writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
146
147#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
148
149/* This function is provided to access the chip's data window. The
150 * register is specified by its byte offset from the window base
151 * for the adapter.
152 */
153#define esas2r_read_data_byte(a, reg) \
154 readb((void __iomem *)a->data_window + (reg))
155
156/* ATTO vendor and device Ids */
157#define ATTO_VENDOR_ID 0x117C
158#define ATTO_DID_INTEL_IOP348 0x002C
159#define ATTO_DID_MV_88RC9580 0x0049
160#define ATTO_DID_MV_88RC9580TS 0x0066
161#define ATTO_DID_MV_88RC9580TSE 0x0067
162#define ATTO_DID_MV_88RC9580TL 0x0068
163
164/* ATTO subsystem device Ids */
165#define ATTO_SSDID_TBT 0x4000
166#define ATTO_TSSC_3808 0x4066
167#define ATTO_TSSC_3808E 0x4067
168#define ATTO_TLSH_1068 0x4068
169#define ATTO_ESAS_R680 0x0049
170#define ATTO_ESAS_R608 0x004A
171#define ATTO_ESAS_R60F 0x004B
172#define ATTO_ESAS_R6F0 0x004C
173#define ATTO_ESAS_R644 0x004D
174#define ATTO_ESAS_R648 0x004E
175
176/*
177 * flash definitions & structures
178 * define the code types
179 */
180#define FBT_CPYR 0xAA00
181#define FBT_SETUP 0xAA02
182#define FBT_FLASH_VER 0xAA04
183
184/* offsets to various locations in flash */
185#define FLS_OFFSET_BOOT (u32)(0x00700000)
186#define FLS_OFFSET_NVR (u32)(0x007C0000)
187#define FLS_OFFSET_CPYR FLS_OFFSET_NVR
188#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
189#define FLS_BLOCK_SIZE (u32)(0x00020000)
190#define FI_NVR_2KB 0x0800
191#define FI_NVR_8KB 0x2000
192#define FM_BUF_SZ 0x800
193
194/*
195 * marvell frey (88R9580) register definitions
196 * chip revision identifiers
197 */
198#define MVR_FREY_B2 0xB2
199
200/*
201 * memory window definitions. window 0 is the data window with definitions
202 * of MW_DATA_XXX. window 1 is the register window with definitions of
203 * MW_REG_XXX.
204 */
205#define MW_REG_WINDOW_SIZE (u32)(0x00040000)
206#define MW_REG_OFFSET_HWREG (u32)(0x00000000)
207#define MW_REG_OFFSET_PCI (u32)(0x00008000)
208#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
209#define MW_DATA_WINDOW_SIZE (u32)(0x00020000)
210#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000)
211#define MW_DATA_ADDR_SRAM (u32)(0xF4000000)
212#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000)
213
214/*
215 * the following registers are for the communication
216 * list interface (AKA message unit (MU))
217 */
218#define MU_IN_LIST_ADDR_LO (u32)(0x00004000)
219#define MU_IN_LIST_ADDR_HI (u32)(0x00004004)
220
221#define MU_IN_LIST_WRITE (u32)(0x00004018)
222 #define MU_ILW_TOGGLE (u32)(0x00004000)
223
224#define MU_IN_LIST_READ (u32)(0x0000401C)
225 #define MU_ILR_TOGGLE (u32)(0x00004000)
226 #define MU_ILIC_LIST (u32)(0x0000000F)
227 #define MU_ILIC_LIST_F0 (u32)(0x00000000)
228 #define MU_ILIC_DEST (u32)(0x00000F00)
229 #define MU_ILIC_DEST_DDR (u32)(0x00000200)
230#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028)
231
232#define MU_IN_LIST_CONFIG (u32)(0x0000402C)
233 #define MU_ILC_ENABLE (u32)(0x00000001)
234 #define MU_ILC_ENTRY_MASK (u32)(0x000000F0)
235 #define MU_ILC_ENTRY_4_DW (u32)(0x00000020)
236 #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000)
237 #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000)
238 #define MU_ILC_NUMBER_SHIFT 16
239
240#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050)
241#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054)
242
243#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
244#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
245
246#define MU_OUT_LIST_WRITE (u32)(0x00004068)
247 #define MU_OLW_TOGGLE (u32)(0x00004000)
248
249#define MU_OUT_LIST_COPY (u32)(0x0000406C)
250 #define MU_OLC_TOGGLE (u32)(0x00004000)
251 #define MU_OLC_WRT_PTR (u32)(0x00003FFF)
252
253#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078)
254 #define MU_OLIC_LIST (u32)(0x0000000F)
255 #define MU_OLIC_LIST_F0 (u32)(0x00000000)
256 #define MU_OLIC_SOURCE (u32)(0x00000F00)
257 #define MU_OLIC_SOURCE_DDR (u32)(0x00000200)
258
259#define MU_OUT_LIST_CONFIG (u32)(0x0000407C)
260 #define MU_OLC_ENABLE (u32)(0x00000001)
261 #define MU_OLC_ENTRY_MASK (u32)(0x000000F0)
262 #define MU_OLC_ENTRY_4_DW (u32)(0x00000020)
263 #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000)
264 #define MU_OLC_NUMBER_SHIFT 16
265
266#define MU_OUT_LIST_INT_STAT (u32)(0x00004088)
267 #define MU_OLIS_INT (u32)(0x00000001)
268
269#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C)
270 #define MU_OLIS_MASK (u32)(0x00000001)
271
272/*
273 * the maximum size of the communication lists is two greater than the
274 * maximum amount of VDA requests. the extra are to prevent queue overflow.
275 */
276#define ESAS2R_MAX_NUM_REQS 256
277#define ESAS2R_NUM_EXTRA 2
278#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
279
280/*
281 * the following registers are for the CPU interface
282 */
283#define MU_CTL_STATUS_IN (u32)(0x00010108)
284 #define MU_CTL_IN_FULL_RST (u32)(0x00000020)
285#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130)
286 #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
287#define MU_DOORBELL_IN (u32)(0x00010460)
288 #define DRBL_RESET_BUS (u32)(0x00000002)
289 #define DRBL_PAUSE_AE (u32)(0x00000004)
290 #define DRBL_RESUME_AE (u32)(0x00000008)
291 #define DRBL_MSG_IFC_DOWN (u32)(0x00000010)
292 #define DRBL_FLASH_REQ (u32)(0x00000020)
293 #define DRBL_FLASH_DONE (u32)(0x00000040)
294 #define DRBL_FORCE_INT (u32)(0x00000080)
295 #define DRBL_MSG_IFC_INIT (u32)(0x00000100)
296 #define DRBL_POWER_DOWN (u32)(0x00000200)
297 #define DRBL_DRV_VER_1 (u32)(0x00010000)
298 #define DRBL_DRV_VER DRBL_DRV_VER_1
299#define MU_DOORBELL_IN_ENB (u32)(0x00010464)
300#define MU_DOORBELL_OUT (u32)(0x00010480)
301 #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
302 #define DRBL_UNUSED_HANDLER (u32)(0x00100000)
303 #define DRBL_UNDEF_INSTR (u32)(0x00200000)
304 #define DRBL_PREFETCH_ABORT (u32)(0x00300000)
305 #define DRBL_DATA_ABORT (u32)(0x00400000)
306 #define DRBL_JUMP_TO_ZERO (u32)(0x00500000)
307 #define DRBL_FW_RESET (u32)(0x00080000)
308 #define DRBL_FW_VER_MSK (u32)(0x00070000)
309 #define DRBL_FW_VER_0 (u32)(0x00000000)
310 #define DRBL_FW_VER_1 (u32)(0x00010000)
311 #define DRBL_FW_VER DRBL_FW_VER_1
312#define MU_DOORBELL_OUT_ENB (u32)(0x00010484)
313 #define DRBL_ENB_MASK (u32)(0x00F803FF)
314#define MU_INT_STATUS_OUT (u32)(0x00010200)
315 #define MU_INTSTAT_POST_OUT (u32)(0x00000010)
316 #define MU_INTSTAT_DRBL_IN (u32)(0x00000100)
317 #define MU_INTSTAT_DRBL (u32)(0x00001000)
318 #define MU_INTSTAT_MASK (u32)(0x00001010)
319#define MU_INT_MASK_OUT (u32)(0x0001020C)
320
321/* PCI express registers accessed via window 1 */
322#define MVR_PCI_WIN1_REMAP (u32)(0x00008438)
323 #define MVRPW1R_ENABLE (u32)(0x00000001)
324
325
326/* structures */
327
328/* inbound list dynamic source entry */
329struct esas2r_inbound_list_source_entry {
330 u64 address;
331 u32 length;
332 #define HWILSE_INTERFACE_F0 0x00000000
333 u32 reserved;
334};
335
336/* PCI data structure in expansion ROM images */
337struct __packed esas2r_boot_header {
338 char signature[4];
339 u16 vendor_id;
340 u16 device_id;
341 u16 VPD;
342 u16 struct_length;
343 u8 struct_revision;
344 u8 class_code[3];
345 u16 image_length;
346 u16 code_revision;
347 u8 code_type;
348 #define CODE_TYPE_PC 0
349 #define CODE_TYPE_OPEN 1
350 #define CODE_TYPE_EFI 3
351 u8 indicator;
352 #define INDICATOR_LAST 0x80
353 u8 reserved[2];
354};
355
356struct __packed esas2r_boot_image {
357 u16 signature;
358 u8 reserved[22];
359 u16 header_offset;
360 u16 pnp_offset;
361};
362
363struct __packed esas2r_pc_image {
364 u16 signature;
365 u8 length;
366 u8 entry_point[3];
367 u8 checksum;
368 u16 image_end;
369 u16 min_size;
370 u8 rom_flags;
371 u8 reserved[12];
372 u16 header_offset;
373 u16 pnp_offset;
374 struct esas2r_boot_header boot_image;
375};
376
377struct __packed esas2r_efi_image {
378 u16 signature;
379 u16 length;
380 u32 efi_signature;
381 #define EFI_ROM_SIG 0x00000EF1
382 u16 image_type;
383 #define EFI_IMAGE_APP 10
384 #define EFI_IMAGE_BSD 11
385 #define EFI_IMAGE_RTD 12
386 u16 machine_type;
387 #define EFI_MACHINE_IA32 0x014c
388 #define EFI_MACHINE_IA64 0x0200
389 #define EFI_MACHINE_X64 0x8664
390 #define EFI_MACHINE_EBC 0x0EBC
391 u16 compression;
392 #define EFI_UNCOMPRESSED 0x0000
393 #define EFI_COMPRESSED 0x0001
394 u8 reserved[8];
395 u16 efi_offset;
396 u16 header_offset;
397 u16 reserved2;
398 struct esas2r_boot_header boot_image;
399};
400
401struct esas2r_adapter;
402struct esas2r_sg_context;
403struct esas2r_request;
404
405typedef void (*RQCALLBK) (struct esas2r_adapter *a,
406 struct esas2r_request *rq);
407typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a,
408 struct esas2r_sg_context *sgc);
409
410struct esas2r_component_header {
411 u8 img_type;
412 #define CH_IT_FW 0x00
413 #define CH_IT_NVR 0x01
414 #define CH_IT_BIOS 0x02
415 #define CH_IT_MAC 0x03
416 #define CH_IT_CFG 0x04
417 #define CH_IT_EFI 0x05
418 u8 status;
419 #define CH_STAT_PENDING 0xff
420 #define CH_STAT_FAILED 0x00
421 #define CH_STAT_SUCCESS 0x01
422 #define CH_STAT_RETRY 0x02
423 #define CH_STAT_INVALID 0x03
424 u8 pad[2];
425 u32 version;
426 u32 length;
427 u32 image_offset;
428};
429
430#define FI_REL_VER_SZ 16
431
432struct esas2r_flash_img_v0 {
433 u8 fi_version;
434 #define FI_VERSION_0 00
435 u8 status;
436 u8 adap_typ;
437 u8 action;
438 u32 length;
439 u16 checksum;
440 u16 driver_error;
441 u16 flags;
442 u16 num_comps;
443 #define FI_NUM_COMPS_V0 5
444 u8 rel_version[FI_REL_VER_SZ];
445 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
446 u8 scratch_buf[FM_BUF_SZ];
447};
448
449struct esas2r_flash_img {
450 u8 fi_version;
451 #define FI_VERSION_1 01
452 u8 status;
453 #define FI_STAT_SUCCESS 0x00
454 #define FI_STAT_FAILED 0x01
455 #define FI_STAT_REBOOT 0x02
456 #define FI_STAT_ADAPTYP 0x03
457 #define FI_STAT_INVALID 0x04
458 #define FI_STAT_CHKSUM 0x05
459 #define FI_STAT_LENGTH 0x06
460 #define FI_STAT_UNKNOWN 0x07
461 #define FI_STAT_IMG_VER 0x08
462 #define FI_STAT_BUSY 0x09
463 #define FI_STAT_DUAL 0x0A
464 #define FI_STAT_MISSING 0x0B
465 #define FI_STAT_UNSUPP 0x0C
466 #define FI_STAT_ERASE 0x0D
467 #define FI_STAT_FLASH 0x0E
468 #define FI_STAT_DEGRADED 0x0F
469 u8 adap_typ;
470 #define FI_AT_UNKNWN 0xFF
471 #define FI_AT_SUN_LAKE 0x0B
472 #define FI_AT_MV_9580 0x0F
473 u8 action;
474 #define FI_ACT_DOWN 0x00
475 #define FI_ACT_UP 0x01
476 #define FI_ACT_UPSZ 0x02
477 #define FI_ACT_MAX 0x02
478 #define FI_ACT_DOWN1 0x80
479 u32 length;
480 u16 checksum;
481 u16 driver_error;
482 u16 flags;
483 #define FI_FLG_NVR_DEF 0x0001
484 u16 num_comps;
485 #define FI_NUM_COMPS_V1 6
486 u8 rel_version[FI_REL_VER_SZ];
487 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
488 u8 scratch_buf[FM_BUF_SZ];
489};
490
491/* definitions for flash script (FS) commands */
492struct esas2r_ioctlfs_command {
493 u8 command;
494 #define ESAS2R_FS_CMD_ERASE 0
495 #define ESAS2R_FS_CMD_READ 1
496 #define ESAS2R_FS_CMD_BEGINW 2
497 #define ESAS2R_FS_CMD_WRITE 3
498 #define ESAS2R_FS_CMD_COMMIT 4
499 #define ESAS2R_FS_CMD_CANCEL 5
500 u8 checksum;
501 u8 reserved[2];
502 u32 flash_addr;
503 u32 length;
504 u32 image_offset;
505};
506
507struct esas2r_ioctl_fs {
508 u8 version;
509 #define ESAS2R_FS_VER 0
510 u8 status;
511 u8 driver_error;
512 u8 adap_type;
513 #define ESAS2R_FS_AT_ESASRAID2 3
514 #define ESAS2R_FS_AT_TSSASRAID2 4
515 #define ESAS2R_FS_AT_TSSASRAID2E 5
516 #define ESAS2R_FS_AT_TLSASHBA 6
517 u8 driver_ver;
518 u8 reserved[11];
519 struct esas2r_ioctlfs_command command;
520 u8 data[1];
521};
522
523struct esas2r_sas_nvram {
524 u8 signature[4];
525 u8 version;
526 #define SASNVR_VERSION_0 0x00
527 #define SASNVR_VERSION SASNVR_VERSION_0
528 u8 checksum;
529 #define SASNVR_CKSUM_SEED 0x5A
530 u8 max_lun_for_target;
531 u8 pci_latency;
532 #define SASNVR_PCILAT_DIS 0x00
533 #define SASNVR_PCILAT_MIN 0x10
534 #define SASNVR_PCILAT_MAX 0xF8
535 u8 options1;
536 #define SASNVR1_BOOT_DRVR 0x01
537 #define SASNVR1_BOOT_SCAN 0x02
538 #define SASNVR1_DIS_PCI_MWI 0x04
539 #define SASNVR1_FORCE_ORD_Q 0x08
540 #define SASNVR1_CACHELINE_0 0x10
541 #define SASNVR1_DIS_DEVSORT 0x20
542 #define SASNVR1_PWR_MGT_EN 0x40
543 #define SASNVR1_WIDEPORT 0x80
544 u8 options2;
545 #define SASNVR2_SINGLE_BUS 0x01
546 #define SASNVR2_SLOT_BIND 0x02
547 #define SASNVR2_EXP_PROG 0x04
548 #define SASNVR2_CMDTHR_LUN 0x08
549 #define SASNVR2_HEARTBEAT 0x10
550 #define SASNVR2_INT_CONNECT 0x20
551 #define SASNVR2_SW_MUX_CTRL 0x40
552 #define SASNVR2_DISABLE_NCQ 0x80
553 u8 int_coalescing;
554 #define SASNVR_COAL_DIS 0x00
555 #define SASNVR_COAL_LOW 0x01
556 #define SASNVR_COAL_MED 0x02
557 #define SASNVR_COAL_HI 0x03
558 u8 cmd_throttle;
559 #define SASNVR_CMDTHR_NONE 0x00
560 u8 dev_wait_time;
561 u8 dev_wait_count;
562 u8 spin_up_delay;
563 #define SASNVR_SPINUP_MAX 0x14
564 u8 ssp_align_rate;
565 u8 sas_addr[8];
566 u8 phy_speed[16];
567 #define SASNVR_SPEED_AUTO 0x00
568 #define SASNVR_SPEED_1_5GB 0x01
569 #define SASNVR_SPEED_3GB 0x02
570 #define SASNVR_SPEED_6GB 0x03
571 #define SASNVR_SPEED_12GB 0x04
572 u8 phy_mux[16];
573 #define SASNVR_MUX_DISABLED 0x00
574 #define SASNVR_MUX_1_5GB 0x01
575 #define SASNVR_MUX_3GB 0x02
576 #define SASNVR_MUX_6GB 0x03
577 u8 phy_flags[16];
578 #define SASNVR_PHF_DISABLED 0x01
579 #define SASNVR_PHF_RD_ONLY 0x02
580 u8 sort_type;
581 #define SASNVR_SORT_SAS_ADDR 0x00
582 #define SASNVR_SORT_H308_CONN 0x01
583 #define SASNVR_SORT_PHY_ID 0x02
584 #define SASNVR_SORT_SLOT_ID 0x03
585 u8 dpm_reqcmd_lmt;
586 u8 dpm_stndby_time;
587 u8 dpm_active_time;
588 u8 phy_target_id[16];
589 #define SASNVR_PTI_DISABLED 0xFF
590 u8 virt_ses_mode;
591 #define SASNVR_VSMH_DISABLED 0x00
592 u8 read_write_mode;
593 #define SASNVR_RWM_DEFAULT 0x00
594 u8 link_down_to;
595 u8 reserved[0xA1];
596};
597
598typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
599
600struct esas2r_sg_context {
601 struct esas2r_adapter *adapter;
602 struct esas2r_request *first_req;
603 u32 length;
604 u8 *cur_offset;
605 PGETPHYSADDR get_phys_addr;
606 union {
607 struct {
608 struct atto_vda_sge *curr;
609 struct atto_vda_sge *last;
610 struct atto_vda_sge *limit;
611 struct atto_vda_sge *chain;
612 } a64;
613 struct {
614 struct atto_physical_region_description *curr;
615 struct atto_physical_region_description *chain;
616 u32 sgl_max_cnt;
617 u32 sge_cnt;
618 } prd;
619 } sge;
620 struct scatterlist *cur_sgel;
621 u8 *exp_offset;
622 int num_sgel;
623 int sgel_count;
624};
625
626struct esas2r_target {
627 u8 flags;
628 #define TF_PASS_THRU 0x01
629 #define TF_USED 0x02
630 u8 new_target_state;
631 u8 target_state;
632 u8 buffered_target_state;
633#define TS_NOT_PRESENT 0x00
634#define TS_PRESENT 0x05
635#define TS_LUN_CHANGE 0x06
636#define TS_INVALID 0xFF
637 u32 block_size;
638 u32 inter_block;
639 u32 inter_byte;
640 u16 virt_targ_id;
641 u16 phys_targ_id;
642 u8 identifier_len;
643 u64 sas_addr;
644 u8 identifier[60];
645 struct atto_vda_ae_lu lu_event;
646};
647
648struct esas2r_request {
649 struct list_head comp_list;
650 struct list_head req_list;
651 union atto_vda_req *vrq;
652 struct esas2r_mem_desc *vrq_md;
653 union {
654 void *data_buf;
655 union atto_vda_rsp_data *vda_rsp_data;
656 };
657 u8 *sense_buf;
658 struct list_head sg_table_head;
659 struct esas2r_mem_desc *sg_table;
660 u32 timeout;
661 #define RQ_TIMEOUT_S1 0xFFFFFFFF
662 #define RQ_TIMEOUT_S2 0xFFFFFFFE
663 #define RQ_MAX_TIMEOUT 0xFFFFFFFD
664 u16 target_id;
665 u8 req_type;
666 #define RT_INI_REQ 0x01
667 #define RT_DISC_REQ 0x02
668 u8 sense_len;
669 union atto_vda_func_rsp func_rsp;
670 RQCALLBK comp_cb;
671 RQCALLBK interrupt_cb;
672 void *interrupt_cx;
673 u8 flags;
674 #define RF_1ST_IBLK_BASE 0x04
675 #define RF_FAILURE_OK 0x08
676 u8 req_stat;
677 u16 vda_req_sz;
678 #define RQ_SIZE_DEFAULT 0
679 u64 lba;
680 RQCALLBK aux_req_cb;
681 void *aux_req_cx;
682 u32 blk_len;
683 u32 max_blk_len;
684 union {
685 struct scsi_cmnd *cmd;
686 u8 *task_management_status_ptr;
687 };
688};
689
690struct esas2r_flash_context {
691 struct esas2r_flash_img *fi;
692 RQCALLBK interrupt_cb;
693 u8 *sgc_offset;
694 u8 *scratch;
695 u32 fi_hdr_len;
696 u8 task;
697 #define FMTSK_ERASE_BOOT 0
698 #define FMTSK_WRTBIOS 1
699 #define FMTSK_READBIOS 2
700 #define FMTSK_WRTMAC 3
701 #define FMTSK_READMAC 4
702 #define FMTSK_WRTEFI 5
703 #define FMTSK_READEFI 6
704 #define FMTSK_WRTCFG 7
705 #define FMTSK_READCFG 8
706 u8 func;
707 u16 num_comps;
708 u32 cmp_len;
709 u32 flsh_addr;
710 u32 curr_len;
711 u8 comp_typ;
712 struct esas2r_sg_context sgc;
713};
714
715struct esas2r_disc_context {
716 u8 disc_evt;
717 #define DCDE_DEV_CHANGE 0x01
718 #define DCDE_DEV_SCAN 0x02
719 u8 state;
720 #define DCS_DEV_RMV 0x00
721 #define DCS_DEV_ADD 0x01
722 #define DCS_BLOCK_DEV_SCAN 0x02
723 #define DCS_RAID_GRP_INFO 0x03
724 #define DCS_PART_INFO 0x04
725 #define DCS_PT_DEV_INFO 0x05
726 #define DCS_PT_DEV_ADDR 0x06
727 #define DCS_DISC_DONE 0xFF
728 u16 flags;
729 #define DCF_DEV_CHANGE 0x0001
730 #define DCF_DEV_SCAN 0x0002
731 #define DCF_POLLED 0x8000
732 u32 interleave;
733 u32 block_size;
734 u16 dev_ix;
735 u8 part_num;
736 u8 raid_grp_ix;
737 char raid_grp_name[16];
738 struct esas2r_target *curr_targ;
739 u16 curr_virt_id;
740 u16 curr_phys_id;
741 u8 scan_gen;
742 u8 dev_addr_type;
743 u64 sas_addr;
744};
745
746struct esas2r_mem_desc {
747 struct list_head next_desc;
748 void *virt_addr;
749 u64 phys_addr;
750 void *pad;
751 void *esas2r_data;
752 u32 esas2r_param;
753 u32 size;
754};
755
756enum fw_event_type {
757 fw_event_null,
758 fw_event_lun_change,
759 fw_event_present,
760 fw_event_not_present,
761 fw_event_vda_ae
762};
763
764struct esas2r_vda_ae {
765 u32 signature;
766#define ESAS2R_VDA_EVENT_SIG 0x4154544F
767 u8 bus_number;
768 u8 devfn;
769 u8 pad[2];
770 union atto_vda_ae vda_ae;
771};
772
773struct esas2r_fw_event_work {
774 struct list_head list;
775 struct delayed_work work;
776 struct esas2r_adapter *a;
777 enum fw_event_type type;
778 u8 data[sizeof(struct esas2r_vda_ae)];
779};
780
781enum state {
782 FW_INVALID_ST,
783 FW_STATUS_ST,
784 FW_COMMAND_ST
785};
786
787struct esas2r_firmware {
788 enum state state;
789 struct esas2r_flash_img header;
790 u8 *data;
791 u64 phys;
792 int orig_len;
793 void *header_buff;
794 u64 header_buff_phys;
795};
796
797struct esas2r_adapter {
798 struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
799 struct esas2r_target *targetdb_end;
800 unsigned char *regs;
801 unsigned char *data_window;
802 u32 volatile flags;
803 #define AF_PORT_CHANGE (u32)(0x00000001)
804 #define AF_CHPRST_NEEDED (u32)(0x00000004)
805 #define AF_CHPRST_PENDING (u32)(0x00000008)
806 #define AF_CHPRST_DETECTED (u32)(0x00000010)
807 #define AF_BUSRST_NEEDED (u32)(0x00000020)
808 #define AF_BUSRST_PENDING (u32)(0x00000040)
809 #define AF_BUSRST_DETECTED (u32)(0x00000080)
810 #define AF_DISABLED (u32)(0x00000100)
811 #define AF_FLASH_LOCK (u32)(0x00000200)
812 #define AF_OS_RESET (u32)(0x00002000)
813 #define AF_FLASHING (u32)(0x00004000)
814 #define AF_POWER_MGT (u32)(0x00008000)
815 #define AF_NVR_VALID (u32)(0x00010000)
816 #define AF_DEGRADED_MODE (u32)(0x00020000)
817 #define AF_DISC_PENDING (u32)(0x00040000)
818 #define AF_TASKLET_SCHEDULED (u32)(0x00080000)
819 #define AF_HEARTBEAT (u32)(0x00200000)
820 #define AF_HEARTBEAT_ENB (u32)(0x00400000)
821 #define AF_NOT_PRESENT (u32)(0x00800000)
822 #define AF_CHPRST_STARTED (u32)(0x01000000)
823 #define AF_FIRST_INIT (u32)(0x02000000)
824 #define AF_POWER_DOWN (u32)(0x04000000)
825 #define AF_DISC_IN_PROG (u32)(0x08000000)
826 #define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
827 #define AF_LEGACY_SGE_MODE (u32)(0x20000000)
828 #define AF_DISC_POLLED (u32)(0x40000000)
829 u32 volatile flags2;
830 #define AF2_SERIAL_FLASH (u32)(0x00000001)
831 #define AF2_DEV_SCAN (u32)(0x00000002)
832 #define AF2_DEV_CNT_OK (u32)(0x00000004)
833 #define AF2_COREDUMP_AVAIL (u32)(0x00000008)
834 #define AF2_COREDUMP_SAVED (u32)(0x00000010)
835 #define AF2_VDA_POWER_DOWN (u32)(0x00000100)
836 #define AF2_THUNDERLINK (u32)(0x00000200)
837 #define AF2_THUNDERBOLT (u32)(0x00000400)
838 #define AF2_INIT_DONE (u32)(0x00000800)
839 #define AF2_INT_PENDING (u32)(0x00001000)
840 #define AF2_TIMER_TICK (u32)(0x00002000)
841 #define AF2_IRQ_CLAIMED (u32)(0x00004000)
842 #define AF2_MSI_ENABLED (u32)(0x00008000)
843 atomic_t disable_cnt;
844 atomic_t dis_ints_cnt;
845 u32 int_stat;
846 u32 int_mask;
847 u32 volatile *outbound_copy;
848 struct list_head avail_request;
849 spinlock_t request_lock;
850 spinlock_t sg_list_lock;
851 spinlock_t queue_lock;
852 spinlock_t mem_lock;
853 struct list_head free_sg_list_head;
854 struct esas2r_mem_desc *sg_list_mds;
855 struct list_head active_list;
856 struct list_head defer_list;
857 struct esas2r_request **req_table;
858 union {
859 u16 prev_dev_cnt;
860 u32 heartbeat_time;
861 #define ESAS2R_HEARTBEAT_TIME (3000)
862 };
863 u32 chip_uptime;
864 #define ESAS2R_CHP_UPTIME_MAX (60000)
865 #define ESAS2R_CHP_UPTIME_CNT (20000)
866 u64 uncached_phys;
867 u8 *uncached;
868 struct esas2r_sas_nvram *nvram;
869 struct esas2r_request general_req;
870 u8 init_msg;
871 #define ESAS2R_INIT_MSG_START 1
872 #define ESAS2R_INIT_MSG_INIT 2
873 #define ESAS2R_INIT_MSG_GET_INIT 3
874 #define ESAS2R_INIT_MSG_REINIT 4
875 u16 cmd_ref_no;
876 u32 fw_version;
877 u32 fw_build;
878 u32 chip_init_time;
879 #define ESAS2R_CHPRST_TIME (180000)
880 #define ESAS2R_CHPRST_WAIT_TIME (2000)
881 u32 last_tick_time;
882 u32 window_base;
883 RQBUILDSGL build_sgl;
884 struct esas2r_request *first_ae_req;
885 u32 list_size;
886 u32 last_write;
887 u32 last_read;
888 u16 max_vdareq_size;
889 u16 disc_wait_cnt;
890 struct esas2r_mem_desc inbound_list_md;
891 struct esas2r_mem_desc outbound_list_md;
892 struct esas2r_disc_context disc_ctx;
893 u8 *disc_buffer;
894 u32 disc_start_time;
895 u32 disc_wait_time;
896 u32 flash_ver;
897 char flash_rev[16];
898 char fw_rev[16];
899 char image_type[16];
900 struct esas2r_flash_context flash_context;
901 u32 num_targets_backend;
902 u32 ioctl_tunnel;
903 struct tasklet_struct tasklet;
904 struct pci_dev *pcid;
905 struct Scsi_Host *host;
906 unsigned int index;
907 char name[32];
908 struct timer_list timer;
909 struct esas2r_firmware firmware;
910 wait_queue_head_t nvram_waiter;
911 int nvram_command_done;
912 wait_queue_head_t fm_api_waiter;
913 int fm_api_command_done;
914 wait_queue_head_t vda_waiter;
915 int vda_command_done;
916 u8 *vda_buffer;
917 u64 ppvda_buffer;
918#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
919#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ)
920 wait_queue_head_t fs_api_waiter;
921 int fs_api_command_done;
922 u64 ppfs_api_buffer;
923 u8 *fs_api_buffer;
924 u32 fs_api_buffer_size;
925 wait_queue_head_t buffered_ioctl_waiter;
926 int buffered_ioctl_done;
927 int uncached_size;
928 struct workqueue_struct *fw_event_q;
929 struct list_head fw_event_list;
930 spinlock_t fw_event_lock;
931 u8 fw_events_off; /* if '1', then ignore events */
932 char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
933 /*
934 * intr_mode stores the interrupt mode currently being used by this
935 * adapter. it is based on the interrupt_mode module parameter, but
936 * can be changed based on the ability (or not) to utilize the
937 * mode requested by the parameter.
938 */
939 int intr_mode;
940#define INTR_MODE_LEGACY 0
941#define INTR_MODE_MSI 1
942#define INTR_MODE_MSIX 2
943 struct esas2r_sg_context fm_api_sgc;
944 u8 *save_offset;
945 struct list_head vrq_mds_head;
946 struct esas2r_mem_desc *vrq_mds;
947 int num_vrqs;
948 struct semaphore fm_api_semaphore;
949 struct semaphore fs_api_semaphore;
950 struct semaphore nvram_semaphore;
951 struct atto_ioctl *local_atto_ioctl;
952 u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
953 unsigned int sysfs_fw_created:1;
954 unsigned int sysfs_fs_created:1;
955 unsigned int sysfs_vda_created:1;
956 unsigned int sysfs_hw_created:1;
957 unsigned int sysfs_live_nvram_created:1;
958 unsigned int sysfs_default_nvram_created:1;
959};
960
961/*
962 * Function Declarations
963 * SCSI functions
964 */
965int esas2r_release(struct Scsi_Host *);
966const char *esas2r_info(struct Scsi_Host *);
967int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
968 struct esas2r_sas_nvram *data);
969int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
970int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
971u8 handle_hba_ioctl(struct esas2r_adapter *a,
972 struct atto_ioctl *ioctl_hba);
973int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
974int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
975int esas2r_slave_alloc(struct scsi_device *dev);
976int esas2r_slave_configure(struct scsi_device *dev);
977void esas2r_slave_destroy(struct scsi_device *dev);
978int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
979int esas2r_change_queue_type(struct scsi_device *dev, int type);
980long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
981
982/* SCSI error handler (eh) functions */
983int esas2r_eh_abort(struct scsi_cmnd *cmd);
984int esas2r_device_reset(struct scsi_cmnd *cmd);
985int esas2r_host_reset(struct scsi_cmnd *cmd);
986int esas2r_bus_reset(struct scsi_cmnd *cmd);
987int esas2r_target_reset(struct scsi_cmnd *cmd);
988
989/* Internal functions */
990int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
991 int index);
992int esas2r_cleanup(struct Scsi_Host *host);
993int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
994int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
995 int count);
996int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
997int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
998 int count);
999int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
1000int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
1001 int count);
1002void esas2r_adapter_tasklet(unsigned long context);
1003irqreturn_t esas2r_interrupt(int irq, void *dev_id);
1004irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
1005void esas2r_kickoff_timer(struct esas2r_adapter *a);
1006int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
1007int esas2r_resume(struct pci_dev *pcid);
1008void esas2r_fw_event_off(struct esas2r_adapter *a);
1009void esas2r_fw_event_on(struct esas2r_adapter *a);
1010bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1011 struct esas2r_sas_nvram *nvram);
1012void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
1013 struct esas2r_sas_nvram *nvram);
1014void esas2r_complete_request_cb(struct esas2r_adapter *a,
1015 struct esas2r_request *rq);
1016void esas2r_reset_detected(struct esas2r_adapter *a);
1017void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
1018 u8 state);
1019int esas2r_req_status_to_error(u8 req_stat);
1020void esas2r_kill_adapter(int i);
1021void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1022struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
1023u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
1024bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
1025 void **uncached_area);
1026bool esas2r_check_adapter(struct esas2r_adapter *a);
1027bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
1028void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1029bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
1030 struct esas2r_request *rqaux, u8 task_mgt_func);
1031void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
1032void esas2r_adapter_interrupt(struct esas2r_adapter *a);
1033void esas2r_do_deferred_processes(struct esas2r_adapter *a);
1034void esas2r_reset_bus(struct esas2r_adapter *a);
1035void esas2r_reset_adapter(struct esas2r_adapter *a);
1036void esas2r_timer_tick(struct esas2r_adapter *a);
1037const char *esas2r_get_model_name(struct esas2r_adapter *a);
1038const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
1039u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
1040 u32 *delay);
1041void esas2r_build_flash_req(struct esas2r_adapter *a,
1042 struct esas2r_request *rq,
1043 u8 sub_func,
1044 u8 cksum,
1045 u32 addr,
1046 u32 length);
1047void esas2r_build_mgt_req(struct esas2r_adapter *a,
1048 struct esas2r_request *rq,
1049 u8 sub_func,
1050 u8 scan_gen,
1051 u16 dev_index,
1052 u32 length,
1053 void *data);
1054void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
1055void esas2r_build_cli_req(struct esas2r_adapter *a,
1056 struct esas2r_request *rq,
1057 u32 length,
1058 u32 cmd_rsp_len);
1059void esas2r_build_ioctl_req(struct esas2r_adapter *a,
1060 struct esas2r_request *rq,
1061 u32 length,
1062 u8 sub_func);
1063void esas2r_build_cfg_req(struct esas2r_adapter *a,
1064 struct esas2r_request *rq,
1065 u8 sub_func,
1066 u32 length,
1067 void *data);
1068void esas2r_power_down(struct esas2r_adapter *a);
1069bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
1070void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1071u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
1072bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
1073 struct esas2r_ioctl_fs *fs,
1074 struct esas2r_request *rq,
1075 struct esas2r_sg_context *sgc);
1076bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
1077 u32 size);
1078bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
1079 u32 size);
1080bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
1081 struct esas2r_request *rq, struct esas2r_sg_context *sgc);
1082void esas2r_force_interrupt(struct esas2r_adapter *a);
1083void esas2r_local_start_request(struct esas2r_adapter *a,
1084 struct esas2r_request *rq);
1085void esas2r_process_adapter_reset(struct esas2r_adapter *a);
1086void esas2r_complete_request(struct esas2r_adapter *a,
1087 struct esas2r_request *rq);
1088void esas2r_dummy_complete(struct esas2r_adapter *a,
1089 struct esas2r_request *rq);
1090void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
1091void esas2r_start_vda_request(struct esas2r_adapter *a,
1092 struct esas2r_request *rq);
1093bool esas2r_read_flash_rev(struct esas2r_adapter *a);
1094bool esas2r_read_image_type(struct esas2r_adapter *a);
1095bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
1096bool esas2r_nvram_validate(struct esas2r_adapter *a);
1097void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
1098bool esas2r_print_flash_rev(struct esas2r_adapter *a);
1099void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
1100bool esas2r_init_msgs(struct esas2r_adapter *a);
1101bool esas2r_is_adapter_present(struct esas2r_adapter *a);
1102void esas2r_nuxi_mgt_data(u8 function, void *data);
1103void esas2r_nuxi_cfg_data(u8 function, void *data);
1104void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
1105void esas2r_reset_chip(struct esas2r_adapter *a);
1106void esas2r_log_request_failure(struct esas2r_adapter *a,
1107 struct esas2r_request *rq);
1108void esas2r_polled_interrupt(struct esas2r_adapter *a);
1109bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
1110 u8 status);
1111bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
1112 struct esas2r_sg_context *sgc);
1113bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
1114 struct esas2r_sg_context *sgc);
1115void esas2r_targ_db_initialize(struct esas2r_adapter *a);
1116void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
1117void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
1118struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
1119 struct esas2r_disc_context *dc);
1120struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
1121 struct esas2r_disc_context *dc,
1122 u8 *ident,
1123 u8 ident_len);
1124void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
1125struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
1126 u64 *sas_addr);
1127struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
1128 void *identifier,
1129 u8 ident_len);
1130u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
1131struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
1132 u16 virt_id);
1133u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
1134void esas2r_disc_initialize(struct esas2r_adapter *a);
1135void esas2r_disc_start_waiting(struct esas2r_adapter *a);
1136void esas2r_disc_check_for_work(struct esas2r_adapter *a);
1137void esas2r_disc_check_complete(struct esas2r_adapter *a);
1138void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
1139bool esas2r_disc_start_port(struct esas2r_adapter *a);
1140void esas2r_disc_local_start_request(struct esas2r_adapter *a,
1141 struct esas2r_request *rq);
1142bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
1143bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
1144 struct atto_ioctl_vda *vi,
1145 struct esas2r_request *rq,
1146 struct esas2r_sg_context *sgc);
1147void esas2r_queue_fw_event(struct esas2r_adapter *a,
1148 enum fw_event_type type,
1149 void *data,
1150 int data_sz);
1151
1152/* Inline functions */
1153static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
1154{
1155 return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
1156}
1157
1158static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
1159{
1160 return test_and_clear_bit(ilog2(bits),
1161 (volatile unsigned long *)flags);
1162}
1163
1164/* Allocate a chip scatter/gather list entry */
1165static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
1166{
1167 unsigned long flags;
1168 struct list_head *sgl;
1169 struct esas2r_mem_desc *result = NULL;
1170
1171 spin_lock_irqsave(&a->sg_list_lock, flags);
1172 if (likely(!list_empty(&a->free_sg_list_head))) {
1173 sgl = a->free_sg_list_head.next;
1174 result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
1175 list_del_init(sgl);
1176 }
1177 spin_unlock_irqrestore(&a->sg_list_lock, flags);
1178
1179 return result;
1180}
1181
1182/* Initialize a scatter/gather context */
1183static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
1184 struct esas2r_adapter *a,
1185 struct esas2r_request *rq,
1186 struct atto_vda_sge *first)
1187{
1188 sgc->adapter = a;
1189 sgc->first_req = rq;
1190
1191 /*
1192 * set the limit pointer such that an SGE pointer above this value
1193 * would be the first one to overflow the SGL.
1194 */
1195 sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
1196 + (sizeof(union
1197 atto_vda_req) /
1198 8)
1199 - sizeof(struct
1200 atto_vda_sge));
1201 if (first) {
1202 sgc->sge.a64.last =
1203 sgc->sge.a64.curr = first;
1204 rq->vrq->scsi.sg_list_offset = (u8)
1205 ((u8 *)first -
1206 (u8 *)rq->vrq);
1207 } else {
1208 sgc->sge.a64.last =
1209 sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
1210 rq->vrq->scsi.sg_list_offset =
1211 (u8)offsetof(struct atto_vda_scsi_req, u.sge);
1212 }
1213 sgc->sge.a64.chain = NULL;
1214}
1215
1216static inline void esas2r_rq_init_request(struct esas2r_request *rq,
1217 struct esas2r_adapter *a)
1218{
1219 union atto_vda_req *vrq = rq->vrq;
1220 u32 handle;
1221
1222 INIT_LIST_HEAD(&rq->sg_table_head);
1223 rq->data_buf = (void *)(vrq + 1);
1224 rq->interrupt_cb = NULL;
1225 rq->comp_cb = esas2r_complete_request_cb;
1226 rq->flags = 0;
1227 rq->timeout = 0;
1228 rq->req_stat = RS_PENDING;
1229 rq->req_type = RT_INI_REQ;
1230
1231 /* clear the outbound response */
1232 rq->func_rsp.dwords[0] = 0;
1233 rq->func_rsp.dwords[1] = 0;
1234
1235 /*
1236 * clear the size of the VDA request. esas2r_build_sg_list() will
1237 * only allow the size of the request to grow. there are some
1238 * management requests that go through there twice and the second
1239 * time through sets a smaller request size. if this is not modified
1240 * at all we'll set it to the size of the entire VDA request.
1241 */
1242 rq->vda_req_sz = RQ_SIZE_DEFAULT;
1243
1244 /* req_table entry should be NULL at this point - if not, halt */
1245
1246 if (a->req_table[LOWORD(vrq->scsi.handle)])
1247 esas2r_bugon();
1248
1249 /* fill in the table for this handle so we can get back to the
1250 * request.
1251 */
1252 a->req_table[LOWORD(vrq->scsi.handle)] = rq;
1253
1254 /*
1255 * add a reference number to the handle to make it unique (until it
1256 * wraps of course) while preserving the upper word
1257 */
1258
1259 handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
1260 vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
1261
1262 /*
1263 * the following formats a SCSI request. the caller can override as
1264 * necessary. clear_vda_request can be called to clear the VDA
1265 * request for another type of request.
1266 */
1267 vrq->scsi.function = VDA_FUNC_SCSI;
1268 vrq->scsi.sense_len = SENSE_DATA_SZ;
1269
1270 /* clear out sg_list_offset and chain_offset */
1271 vrq->scsi.sg_list_offset = 0;
1272 vrq->scsi.chain_offset = 0;
1273 vrq->scsi.flags = 0;
1274 vrq->scsi.reserved = 0;
1275
1276 /* set the sense buffer to be the data payload buffer */
1277 vrq->scsi.ppsense_buf
1278 = cpu_to_le64(rq->vrq_md->phys_addr +
1279 sizeof(union atto_vda_req));
1280}
1281
1282static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
1283 struct esas2r_adapter *a)
1284{
1285 unsigned long flags;
1286
1287 if (list_empty(&rq->sg_table_head))
1288 return;
1289
1290 spin_lock_irqsave(&a->sg_list_lock, flags);
1291 list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
1292 spin_unlock_irqrestore(&a->sg_list_lock, flags);
1293}
1294
1295static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
1296 struct esas2r_adapter *a)
1297
1298{
1299 esas2r_rq_free_sg_lists(rq, a);
1300 a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
1301 rq->data_buf = NULL;
1302}
1303
1304static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
1305{
1306 return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
1307 | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
1308 | AF_PORT_CHANGE))
1309 ? true : false;
1310}
1311
1312/*
1313 * Build the scatter/gather list for an I/O request according to the
1314 * specifications placed in the esas2r_sg_context. The caller must initialize
1315 * struct esas2r_sg_context prior to the initial call by calling
1316 * esas2r_sgc_init()
1317 */
1318static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
1319 struct esas2r_request *rq,
1320 struct esas2r_sg_context *sgc)
1321{
1322 if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
1323 return true;
1324
1325 return (*a->build_sgl)(a, sgc);
1326}
1327
1328static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
1329{
1330 if (atomic_inc_return(&a->dis_ints_cnt) == 1)
1331 esas2r_write_register_dword(a, MU_INT_MASK_OUT,
1332 ESAS2R_INT_DIS_MASK);
1333}
1334
1335static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
1336{
1337 if (atomic_dec_return(&a->dis_ints_cnt) == 0)
1338 esas2r_write_register_dword(a, MU_INT_MASK_OUT,
1339 ESAS2R_INT_ENB_MASK);
1340}
1341
1342/* Schedule a TASKLET to perform non-interrupt tasks that may require delays
1343 * or long completion times.
1344 */
1345static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
1346{
1347 /* make sure we don't schedule twice */
1348 if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
1349 ilog2(AF_TASKLET_SCHEDULED)))
1350 tasklet_hi_schedule(&a->tasklet);
1351}
1352
1353static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
1354{
1355 if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
1356 && (a->nvram->options2 & SASNVR2_HEARTBEAT))
1357 esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
1358 else
1359 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
1360}
1361
1362static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
1363{
1364 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
1365 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
1366}
1367
1368/* Set the initial state for resetting the adapter on the next pass through
1369 * esas2r_do_deferred.
1370 */
1371static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
1372{
1373 esas2r_disable_heartbeat(a);
1374
1375 esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
1376 esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
1377 esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
1378}
1379
1380/* See if an interrupt is pending on the adapter. */
1381static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
1382{
1383 u32 intstat;
1384
1385 if (a->int_mask == 0)
1386 return false;
1387
1388 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
1389
1390 if ((intstat & a->int_mask) == 0)
1391 return false;
1392
1393 esas2r_disable_chip_interrupts(a);
1394
1395 a->int_stat = intstat;
1396 a->int_mask = 0;
1397
1398 return true;
1399}
1400
1401static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
1402 struct esas2r_adapter *a)
1403{
1404 return (u16)(uintptr_t)(t - a->targetdb);
1405}
1406
1407/* Build and start an asynchronous event request */
1408static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
1409 struct esas2r_request *rq)
1410{
1411 unsigned long flags;
1412
1413 esas2r_build_ae_req(a, rq);
1414
1415 spin_lock_irqsave(&a->queue_lock, flags);
1416 esas2r_start_vda_request(a, rq);
1417 spin_unlock_irqrestore(&a->queue_lock, flags);
1418}
1419
1420static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
1421 struct list_head *comp_list)
1422{
1423 struct esas2r_request *rq;
1424 struct list_head *element, *next;
1425
1426 list_for_each_safe(element, next, comp_list) {
1427 rq = list_entry(element, struct esas2r_request, comp_list);
1428 list_del_init(element);
1429 esas2r_complete_request(a, rq);
1430 }
1431}
1432
1433/* sysfs handlers */
1434extern struct bin_attribute bin_attr_fw;
1435extern struct bin_attribute bin_attr_fs;
1436extern struct bin_attribute bin_attr_vda;
1437extern struct bin_attribute bin_attr_hw;
1438extern struct bin_attribute bin_attr_live_nvram;
1439extern struct bin_attribute bin_attr_default_nvram;
1440
1441#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
new file mode 100644
index 000000000000..dec6c334ce3e
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -0,0 +1,1189 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_disc.c
3 * esas2r device discovery routines
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44
45#include "esas2r.h"
46
47/* Miscellaneous internal discovery routines */
48static void esas2r_disc_abort(struct esas2r_adapter *a,
49 struct esas2r_request *rq);
50static bool esas2r_disc_continue(struct esas2r_adapter *a,
51 struct esas2r_request *rq);
52static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
53static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
54static bool esas2r_disc_start_request(struct esas2r_adapter *a,
55 struct esas2r_request *rq);
56
57/* Internal discovery routines that process the states */
58static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
59 struct esas2r_request *rq);
60static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
61 struct esas2r_request *rq);
62static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
63 struct esas2r_request *rq);
64static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
65 struct esas2r_request *rq);
66static bool esas2r_disc_part_info(struct esas2r_adapter *a,
67 struct esas2r_request *rq);
68static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
69 struct esas2r_request *rq);
70static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
71 struct esas2r_request *rq);
72static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
73 struct esas2r_request *rq);
74static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
75 struct esas2r_request *rq);
76static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
77 struct esas2r_request *rq);
78static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
79 struct esas2r_request *rq);
80static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
81 struct esas2r_request *rq);
82
83void esas2r_disc_initialize(struct esas2r_adapter *a)
84{
85 struct esas2r_sas_nvram *nvr = a->nvram;
86
87 esas2r_trace_enter();
88
89 esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
90 esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
91 esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
92
93 a->disc_start_time = jiffies_to_msecs(jiffies);
94 a->disc_wait_time = nvr->dev_wait_time * 1000;
95 a->disc_wait_cnt = nvr->dev_wait_count;
96
97 if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
98 a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
99
100 /*
101 * If we are doing chip reset or power management processing, always
102 * wait for devices. use the NVRAM device count if it is greater than
103 * previously discovered devices.
104 */
105
106 esas2r_hdebug("starting discovery...");
107
108 a->general_req.interrupt_cx = NULL;
109
110 if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
111 if (a->prev_dev_cnt == 0) {
112 /* Don't bother waiting if there is nothing to wait
113 * for.
114 */
115 a->disc_wait_time = 0;
116 } else {
117 /*
118 * Set the device wait count to what was previously
119 * found. We don't care if the user only configured
120 * a time because we know the exact count to wait for.
121 * There is no need to honor the user's wishes to
122 * always wait the full time.
123 */
124 a->disc_wait_cnt = a->prev_dev_cnt;
125
126 /*
127 * bump the minimum wait time to 15 seconds since the
128 * default is 3 (system boot or the boot driver usually
129 * buys us more time).
130 */
131 if (a->disc_wait_time < 15000)
132 a->disc_wait_time = 15000;
133 }
134 }
135
136 esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
137 esas2r_trace("disc wait time: %d", a->disc_wait_time);
138
139 if (a->disc_wait_time == 0)
140 esas2r_disc_check_complete(a);
141
142 esas2r_trace_exit();
143}
144
145void esas2r_disc_start_waiting(struct esas2r_adapter *a)
146{
147 unsigned long flags;
148
149 spin_lock_irqsave(&a->mem_lock, flags);
150
151 if (a->disc_ctx.disc_evt)
152 esas2r_disc_start_port(a);
153
154 spin_unlock_irqrestore(&a->mem_lock, flags);
155}
156
157void esas2r_disc_check_for_work(struct esas2r_adapter *a)
158{
159 struct esas2r_request *rq = &a->general_req;
160
161 /* service any pending interrupts first */
162
163 esas2r_polled_interrupt(a);
164
165 /*
166 * now, interrupt processing may have queued up a discovery event. go
167 * see if we have one to start. we couldn't start it in the ISR since
168 * polled discovery would cause a deadlock.
169 */
170
171 esas2r_disc_start_waiting(a);
172
173 if (rq->interrupt_cx == NULL)
174 return;
175
176 if (rq->req_stat == RS_STARTED
177 && rq->timeout <= RQ_MAX_TIMEOUT) {
178 /* wait for the current discovery request to complete. */
179 esas2r_wait_request(a, rq);
180
181 if (rq->req_stat == RS_TIMEOUT) {
182 esas2r_disc_abort(a, rq);
183 esas2r_local_reset_adapter(a);
184 return;
185 }
186 }
187
188 if (rq->req_stat == RS_PENDING
189 || rq->req_stat == RS_STARTED)
190 return;
191
192 esas2r_disc_continue(a, rq);
193}
194
195void esas2r_disc_check_complete(struct esas2r_adapter *a)
196{
197 unsigned long flags;
198
199 esas2r_trace_enter();
200
201 /* check to see if we should be waiting for devices */
202 if (a->disc_wait_time) {
203 u32 currtime = jiffies_to_msecs(jiffies);
204 u32 time = currtime - a->disc_start_time;
205
206 /*
207 * Wait until the device wait time is exhausted or the device
208 * wait count is satisfied.
209 */
210 if (time < a->disc_wait_time
211 && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
212 || a->disc_wait_cnt == 0)) {
213 /* After three seconds of waiting, schedule a scan. */
214 if (time >= 3000
215 && !(esas2r_lock_set_flags(&a->flags2,
216 AF2_DEV_SCAN) &
217 ilog2(AF2_DEV_SCAN))) {
218 spin_lock_irqsave(&a->mem_lock, flags);
219 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
220 spin_unlock_irqrestore(&a->mem_lock, flags);
221 }
222
223 esas2r_trace_exit();
224 return;
225 }
226
227 /*
228 * We are done waiting...we think. Adjust the wait time to
229 * consume events after the count is met.
230 */
231 if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
232 & ilog2(AF2_DEV_CNT_OK)))
233 a->disc_wait_time = time + 3000;
234
235 /* If we haven't done a full scan yet, do it now. */
236 if (!(esas2r_lock_set_flags(&a->flags2,
237 AF2_DEV_SCAN) &
238 ilog2(AF2_DEV_SCAN))) {
239 spin_lock_irqsave(&a->mem_lock, flags);
240 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
241 spin_unlock_irqrestore(&a->mem_lock, flags);
242
243 esas2r_trace_exit();
244 return;
245 }
246
247 /*
248 * Now, if there is still time left to consume events, continue
249 * waiting.
250 */
251 if (time < a->disc_wait_time) {
252 esas2r_trace_exit();
253 return;
254 }
255 } else {
256 if (!(esas2r_lock_set_flags(&a->flags2,
257 AF2_DEV_SCAN) &
258 ilog2(AF2_DEV_SCAN))) {
259 spin_lock_irqsave(&a->mem_lock, flags);
260 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
261 spin_unlock_irqrestore(&a->mem_lock, flags);
262 }
263 }
264
265 /* We want to stop waiting for devices. */
266 a->disc_wait_time = 0;
267
268 if ((a->flags & AF_DISC_POLLED)
269 && (a->flags & AF_DISC_IN_PROG)) {
270 /*
271 * Polled discovery is still pending so continue the active
272 * discovery until it is done. At that point, we will stop
273 * polled discovery and transition to interrupt driven
274 * discovery.
275 */
276 } else {
277 /*
278 * Done waiting for devices. Note that we get here immediately
279 * after deferred waiting completes because that is interrupt
280 * driven; i.e. There is no transition.
281 */
282 esas2r_disc_fix_curr_requests(a);
283 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
284
285 /*
286 * We have deferred target state changes until now because we
287 * don't want to report any removals (due to the first arrival)
288 * until the device wait time expires.
289 */
290 esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
291 }
292
293 esas2r_trace_exit();
294}
295
296void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
297{
298 struct esas2r_disc_context *dc = &a->disc_ctx;
299
300 esas2r_trace_enter();
301
302 esas2r_trace("disc_event: %d", disc_evt);
303
304 /* Initialize the discovery context */
305 dc->disc_evt |= disc_evt;
306
307 /*
308 * Don't start discovery before or during polled discovery. if we did,
309 * we would have a deadlock if we are in the ISR already.
310 */
311 if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
312 esas2r_disc_start_port(a);
313
314 esas2r_trace_exit();
315}
316
317bool esas2r_disc_start_port(struct esas2r_adapter *a)
318{
319 struct esas2r_request *rq = &a->general_req;
320 struct esas2r_disc_context *dc = &a->disc_ctx;
321 bool ret;
322
323 esas2r_trace_enter();
324
325 if (a->flags & AF_DISC_IN_PROG) {
326 esas2r_trace_exit();
327
328 return false;
329 }
330
331 /* If there is a discovery waiting, process it. */
332 if (dc->disc_evt) {
333 if ((a->flags & AF_DISC_POLLED)
334 && a->disc_wait_time == 0) {
335 /*
336 * We are doing polled discovery, but we no longer want
337 * to wait for devices. Stop polled discovery and
338 * transition to interrupt driven discovery.
339 */
340
341 esas2r_trace_exit();
342
343 return false;
344 }
345 } else {
346 /* Discovery is complete. */
347
348 esas2r_hdebug("disc done");
349
350 esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
351
352 esas2r_trace_exit();
353
354 return false;
355 }
356
357 /* Handle the discovery context */
358 esas2r_trace("disc_evt: %d", dc->disc_evt);
359 esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
360 dc->flags = 0;
361
362 if (a->flags & AF_DISC_POLLED)
363 dc->flags |= DCF_POLLED;
364
365 rq->interrupt_cx = dc;
366 rq->req_stat = RS_SUCCESS;
367
368 /* Decode the event code */
369 if (dc->disc_evt & DCDE_DEV_SCAN) {
370 dc->disc_evt &= ~DCDE_DEV_SCAN;
371
372 dc->flags |= DCF_DEV_SCAN;
373 dc->state = DCS_BLOCK_DEV_SCAN;
374 } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
375 dc->disc_evt &= ~DCDE_DEV_CHANGE;
376
377 dc->flags |= DCF_DEV_CHANGE;
378 dc->state = DCS_DEV_RMV;
379 }
380
381 /* Continue interrupt driven discovery */
382 if (!(a->flags & AF_DISC_POLLED))
383 ret = esas2r_disc_continue(a, rq);
384 else
385 ret = true;
386
387 esas2r_trace_exit();
388
389 return ret;
390}
391
392static bool esas2r_disc_continue(struct esas2r_adapter *a,
393 struct esas2r_request *rq)
394{
395 struct esas2r_disc_context *dc =
396 (struct esas2r_disc_context *)rq->interrupt_cx;
397 bool rslt;
398
399 /* Device discovery/removal */
400 while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
401 rslt = false;
402
403 switch (dc->state) {
404 case DCS_DEV_RMV:
405
406 rslt = esas2r_disc_dev_remove(a, rq);
407 break;
408
409 case DCS_DEV_ADD:
410
411 rslt = esas2r_disc_dev_add(a, rq);
412 break;
413
414 case DCS_BLOCK_DEV_SCAN:
415
416 rslt = esas2r_disc_block_dev_scan(a, rq);
417 break;
418
419 case DCS_RAID_GRP_INFO:
420
421 rslt = esas2r_disc_raid_grp_info(a, rq);
422 break;
423
424 case DCS_PART_INFO:
425
426 rslt = esas2r_disc_part_info(a, rq);
427 break;
428
429 case DCS_PT_DEV_INFO:
430
431 rslt = esas2r_disc_passthru_dev_info(a, rq);
432 break;
433 case DCS_PT_DEV_ADDR:
434
435 rslt = esas2r_disc_passthru_dev_addr(a, rq);
436 break;
437 case DCS_DISC_DONE:
438
439 dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
440 break;
441
442 default:
443
444 esas2r_bugon();
445 dc->state = DCS_DISC_DONE;
446 break;
447 }
448
449 if (rslt)
450 return true;
451 }
452
453 /* Discovery is done...for now. */
454 rq->interrupt_cx = NULL;
455
456 if (!(a->flags & AF_DISC_PENDING))
457 esas2r_disc_fix_curr_requests(a);
458
459 esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
460
461 /* Start the next discovery. */
462 return esas2r_disc_start_port(a);
463}
464
465static bool esas2r_disc_start_request(struct esas2r_adapter *a,
466 struct esas2r_request *rq)
467{
468 unsigned long flags;
469
470 /* Set the timeout to a minimum value. */
471 if (rq->timeout < ESAS2R_DEFAULT_TMO)
472 rq->timeout = ESAS2R_DEFAULT_TMO;
473
474 /*
475 * Override the request type to distinguish discovery requests. If we
476 * end up deferring the request, esas2r_disc_local_start_request()
477 * will be called to restart it.
478 */
479 rq->req_type = RT_DISC_REQ;
480
481 spin_lock_irqsave(&a->queue_lock, flags);
482
483 if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
484 esas2r_disc_local_start_request(a, rq);
485 else
486 list_add_tail(&rq->req_list, &a->defer_list);
487
488 spin_unlock_irqrestore(&a->queue_lock, flags);
489
490 return true;
491}
492
493void esas2r_disc_local_start_request(struct esas2r_adapter *a,
494 struct esas2r_request *rq)
495{
496 esas2r_trace_enter();
497
498 list_add_tail(&rq->req_list, &a->active_list);
499
500 esas2r_start_vda_request(a, rq);
501
502 esas2r_trace_exit();
503
504 return;
505}
506
507static void esas2r_disc_abort(struct esas2r_adapter *a,
508 struct esas2r_request *rq)
509{
510 struct esas2r_disc_context *dc =
511 (struct esas2r_disc_context *)rq->interrupt_cx;
512
513 esas2r_trace_enter();
514
515 /* abort the current discovery */
516
517 dc->state = DCS_DISC_DONE;
518
519 esas2r_trace_exit();
520}
521
522static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
523 struct esas2r_request *rq)
524{
525 struct esas2r_disc_context *dc =
526 (struct esas2r_disc_context *)rq->interrupt_cx;
527 bool rslt;
528
529 esas2r_trace_enter();
530
531 esas2r_rq_init_request(rq, a);
532
533 esas2r_build_mgt_req(a,
534 rq,
535 VDAMGT_DEV_SCAN,
536 0,
537 0,
538 0,
539 NULL);
540
541 rq->comp_cb = esas2r_disc_block_dev_scan_cb;
542
543 rq->timeout = 30000;
544 rq->interrupt_cx = dc;
545
546 rslt = esas2r_disc_start_request(a, rq);
547
548 esas2r_trace_exit();
549
550 return rslt;
551}
552
553static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
554 struct esas2r_request *rq)
555{
556 struct esas2r_disc_context *dc =
557 (struct esas2r_disc_context *)rq->interrupt_cx;
558 unsigned long flags;
559
560 esas2r_trace_enter();
561
562 spin_lock_irqsave(&a->mem_lock, flags);
563
564 if (rq->req_stat == RS_SUCCESS)
565 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
566
567 dc->state = DCS_RAID_GRP_INFO;
568 dc->raid_grp_ix = 0;
569
570 esas2r_rq_destroy_request(rq, a);
571
572 /* continue discovery if it's interrupt driven */
573
574 if (!(dc->flags & DCF_POLLED))
575 esas2r_disc_continue(a, rq);
576
577 spin_unlock_irqrestore(&a->mem_lock, flags);
578
579 esas2r_trace_exit();
580}
581
582static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
583 struct esas2r_request *rq)
584{
585 struct esas2r_disc_context *dc =
586 (struct esas2r_disc_context *)rq->interrupt_cx;
587 bool rslt;
588 struct atto_vda_grp_info *grpinfo;
589
590 esas2r_trace_enter();
591
592 esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
593
594 if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
595 dc->state = DCS_DISC_DONE;
596
597 esas2r_trace_exit();
598
599 return false;
600 }
601
602 esas2r_rq_init_request(rq, a);
603
604 grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
605
606 memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
607
608 esas2r_build_mgt_req(a,
609 rq,
610 VDAMGT_GRP_INFO,
611 dc->scan_gen,
612 0,
613 sizeof(struct atto_vda_grp_info),
614 NULL);
615
616 grpinfo->grp_index = dc->raid_grp_ix;
617
618 rq->comp_cb = esas2r_disc_raid_grp_info_cb;
619
620 rq->interrupt_cx = dc;
621
622 rslt = esas2r_disc_start_request(a, rq);
623
624 esas2r_trace_exit();
625
626 return rslt;
627}
628
629static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
630 struct esas2r_request *rq)
631{
632 struct esas2r_disc_context *dc =
633 (struct esas2r_disc_context *)rq->interrupt_cx;
634 unsigned long flags;
635 struct atto_vda_grp_info *grpinfo;
636
637 esas2r_trace_enter();
638
639 spin_lock_irqsave(&a->mem_lock, flags);
640
641 if (rq->req_stat == RS_SCAN_GEN) {
642 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
643 dc->raid_grp_ix = 0;
644 goto done;
645 }
646
647 if (rq->req_stat == RS_SUCCESS) {
648 grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
649
650 if (grpinfo->status != VDA_GRP_STAT_ONLINE
651 && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
652 /* go to the next group. */
653
654 dc->raid_grp_ix++;
655 } else {
656 memcpy(&dc->raid_grp_name[0],
657 &grpinfo->grp_name[0],
658 sizeof(grpinfo->grp_name));
659
660 dc->interleave = le32_to_cpu(grpinfo->interleave);
661 dc->block_size = le32_to_cpu(grpinfo->block_size);
662
663 dc->state = DCS_PART_INFO;
664 dc->part_num = 0;
665 }
666 } else {
667 if (!(rq->req_stat == RS_GRP_INVALID)) {
668 esas2r_log(ESAS2R_LOG_WARN,
669 "A request for RAID group info failed - "
670 "returned with %x",
671 rq->req_stat);
672 }
673
674 dc->dev_ix = 0;
675 dc->state = DCS_PT_DEV_INFO;
676 }
677
678done:
679
680 esas2r_rq_destroy_request(rq, a);
681
682 /* continue discovery if it's interrupt driven */
683
684 if (!(dc->flags & DCF_POLLED))
685 esas2r_disc_continue(a, rq);
686
687 spin_unlock_irqrestore(&a->mem_lock, flags);
688
689 esas2r_trace_exit();
690}
691
692static bool esas2r_disc_part_info(struct esas2r_adapter *a,
693 struct esas2r_request *rq)
694{
695 struct esas2r_disc_context *dc =
696 (struct esas2r_disc_context *)rq->interrupt_cx;
697 bool rslt;
698 struct atto_vdapart_info *partinfo;
699
700 esas2r_trace_enter();
701
702 esas2r_trace("part_num: %d", dc->part_num);
703
704 if (dc->part_num >= VDA_MAX_PARTITIONS) {
705 dc->state = DCS_RAID_GRP_INFO;
706 dc->raid_grp_ix++;
707
708 esas2r_trace_exit();
709
710 return false;
711 }
712
713 esas2r_rq_init_request(rq, a);
714
715 partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
716
717 memset(partinfo, 0, sizeof(struct atto_vdapart_info));
718
719 esas2r_build_mgt_req(a,
720 rq,
721 VDAMGT_PART_INFO,
722 dc->scan_gen,
723 0,
724 sizeof(struct atto_vdapart_info),
725 NULL);
726
727 partinfo->part_no = dc->part_num;
728
729 memcpy(&partinfo->grp_name[0],
730 &dc->raid_grp_name[0],
731 sizeof(partinfo->grp_name));
732
733 rq->comp_cb = esas2r_disc_part_info_cb;
734
735 rq->interrupt_cx = dc;
736
737 rslt = esas2r_disc_start_request(a, rq);
738
739 esas2r_trace_exit();
740
741 return rslt;
742}
743
744static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
745 struct esas2r_request *rq)
746{
747 struct esas2r_disc_context *dc =
748 (struct esas2r_disc_context *)rq->interrupt_cx;
749 unsigned long flags;
750 struct atto_vdapart_info *partinfo;
751
752 esas2r_trace_enter();
753
754 spin_lock_irqsave(&a->mem_lock, flags);
755
756 if (rq->req_stat == RS_SCAN_GEN) {
757 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
758 dc->raid_grp_ix = 0;
759 dc->state = DCS_RAID_GRP_INFO;
760 } else if (rq->req_stat == RS_SUCCESS) {
761 partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
762
763 dc->part_num = partinfo->part_no;
764
765 dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
766
767 esas2r_targ_db_add_raid(a, dc);
768
769 dc->part_num++;
770 } else {
771 if (!(rq->req_stat == RS_PART_LAST)) {
772 esas2r_log(ESAS2R_LOG_WARN,
773 "A request for RAID group partition info "
774 "failed - status:%d", rq->req_stat);
775 }
776
777 dc->state = DCS_RAID_GRP_INFO;
778 dc->raid_grp_ix++;
779 }
780
781 esas2r_rq_destroy_request(rq, a);
782
783 /* continue discovery if it's interrupt driven */
784
785 if (!(dc->flags & DCF_POLLED))
786 esas2r_disc_continue(a, rq);
787
788 spin_unlock_irqrestore(&a->mem_lock, flags);
789
790 esas2r_trace_exit();
791}
792
793static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
794 struct esas2r_request *rq)
795{
796 struct esas2r_disc_context *dc =
797 (struct esas2r_disc_context *)rq->interrupt_cx;
798 bool rslt;
799 struct atto_vda_devinfo *devinfo;
800
801 esas2r_trace_enter();
802
803 esas2r_trace("dev_ix: %d", dc->dev_ix);
804
805 esas2r_rq_init_request(rq, a);
806
807 devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
808
809 memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
810
811 esas2r_build_mgt_req(a,
812 rq,
813 VDAMGT_DEV_PT_INFO,
814 dc->scan_gen,
815 dc->dev_ix,
816 sizeof(struct atto_vda_devinfo),
817 NULL);
818
819 rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
820
821 rq->interrupt_cx = dc;
822
823 rslt = esas2r_disc_start_request(a, rq);
824
825 esas2r_trace_exit();
826
827 return rslt;
828}
829
830static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
831 struct esas2r_request *rq)
832{
833 struct esas2r_disc_context *dc =
834 (struct esas2r_disc_context *)rq->interrupt_cx;
835 unsigned long flags;
836 struct atto_vda_devinfo *devinfo;
837
838 esas2r_trace_enter();
839
840 spin_lock_irqsave(&a->mem_lock, flags);
841
842 if (rq->req_stat == RS_SCAN_GEN) {
843 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
844 dc->dev_ix = 0;
845 dc->state = DCS_PT_DEV_INFO;
846 } else if (rq->req_stat == RS_SUCCESS) {
847 devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
848
849 dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
850
851 dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
852
853 if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
854 dc->curr_phys_id =
855 le16_to_cpu(devinfo->phys_target_id);
856 dc->dev_addr_type = ATTO_GDA_AT_PORT;
857 dc->state = DCS_PT_DEV_ADDR;
858
859 esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
860 esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
861 } else {
862 dc->dev_ix++;
863 }
864 } else {
865 if (!(rq->req_stat == RS_DEV_INVALID)) {
866 esas2r_log(ESAS2R_LOG_WARN,
867 "A request for device information failed - "
868 "status:%d", rq->req_stat);
869 }
870
871 dc->state = DCS_DISC_DONE;
872 }
873
874 esas2r_rq_destroy_request(rq, a);
875
876 /* continue discovery if it's interrupt driven */
877
878 if (!(dc->flags & DCF_POLLED))
879 esas2r_disc_continue(a, rq);
880
881 spin_unlock_irqrestore(&a->mem_lock, flags);
882
883 esas2r_trace_exit();
884}
885
886static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
887 struct esas2r_request *rq)
888{
889 struct esas2r_disc_context *dc =
890 (struct esas2r_disc_context *)rq->interrupt_cx;
891 bool rslt;
892 struct atto_ioctl *hi;
893 struct esas2r_sg_context sgc;
894
895 esas2r_trace_enter();
896
897 esas2r_rq_init_request(rq, a);
898
899 /* format the request. */
900
901 sgc.cur_offset = NULL;
902 sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
903 sgc.length = offsetof(struct atto_ioctl, data)
904 + sizeof(struct atto_hba_get_device_address);
905
906 esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
907
908 esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
909
910 if (!esas2r_build_sg_list(a, rq, &sgc)) {
911 esas2r_rq_destroy_request(rq, a);
912
913 esas2r_trace_exit();
914
915 return false;
916 }
917
918 rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
919
920 rq->interrupt_cx = dc;
921
922 /* format the IOCTL data. */
923
924 hi = (struct atto_ioctl *)a->disc_buffer;
925
926 memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
927
928 hi->version = ATTO_VER_GET_DEV_ADDR0;
929 hi->function = ATTO_FUNC_GET_DEV_ADDR;
930 hi->flags = HBAF_TUNNEL;
931
932 hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
933 hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
934
935 /* start it up. */
936
937 rslt = esas2r_disc_start_request(a, rq);
938
939 esas2r_trace_exit();
940
941 return rslt;
942}
943
944static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
945 struct esas2r_request *rq)
946{
947 struct esas2r_disc_context *dc =
948 (struct esas2r_disc_context *)rq->interrupt_cx;
949 struct esas2r_target *t = NULL;
950 unsigned long flags;
951 struct atto_ioctl *hi;
952 u16 addrlen;
953
954 esas2r_trace_enter();
955
956 spin_lock_irqsave(&a->mem_lock, flags);
957
958 hi = (struct atto_ioctl *)a->disc_buffer;
959
960 if (rq->req_stat == RS_SUCCESS
961 && hi->status == ATTO_STS_SUCCESS) {
962 addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
963
964 if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
965 if (addrlen == sizeof(u64))
966 memcpy(&dc->sas_addr,
967 &hi->data.get_dev_addr.address[0],
968 addrlen);
969 else
970 memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
971
972 /* Get the unique identifier. */
973 dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
974
975 goto next_dev_addr;
976 } else {
977 /* Add the pass through target. */
978 if (HIBYTE(addrlen) == 0) {
979 t = esas2r_targ_db_add_pthru(a,
980 dc,
981 &hi->data.
982 get_dev_addr.
983 address[0],
984 (u8)hi->data.
985 get_dev_addr.
986 addr_len);
987
988 if (t)
989 memcpy(&t->sas_addr, &dc->sas_addr,
990 sizeof(t->sas_addr));
991 } else {
992 /* getting the back end data failed */
993
994 esas2r_log(ESAS2R_LOG_WARN,
995 "an error occurred retrieving the "
996 "back end data (%s:%d)",
997 __func__,
998 __LINE__);
999 }
1000 }
1001 } else {
1002 /* getting the back end data failed */
1003
1004 esas2r_log(ESAS2R_LOG_WARN,
1005 "an error occurred retrieving the back end data - "
1006 "rq->req_stat:%d hi->status:%d",
1007 rq->req_stat, hi->status);
1008 }
1009
1010 /* proceed to the next device. */
1011
1012 if (dc->flags & DCF_DEV_SCAN) {
1013 dc->dev_ix++;
1014 dc->state = DCS_PT_DEV_INFO;
1015 } else if (dc->flags & DCF_DEV_CHANGE) {
1016 dc->curr_targ++;
1017 dc->state = DCS_DEV_ADD;
1018 } else {
1019 esas2r_bugon();
1020 }
1021
1022next_dev_addr:
1023 esas2r_rq_destroy_request(rq, a);
1024
1025 /* continue discovery if it's interrupt driven */
1026
1027 if (!(dc->flags & DCF_POLLED))
1028 esas2r_disc_continue(a, rq);
1029
1030 spin_unlock_irqrestore(&a->mem_lock, flags);
1031
1032 esas2r_trace_exit();
1033}
1034
1035static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
1036{
1037 struct esas2r_adapter *a = sgc->adapter;
1038
1039 if (sgc->length > ESAS2R_DISC_BUF_LEN)
1040 esas2r_bugon();
1041
1042 *addr = a->uncached_phys
1043 + (u64)((u8 *)a->disc_buffer - a->uncached);
1044
1045 return sgc->length;
1046}
1047
1048static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
1049 struct esas2r_request *rq)
1050{
1051 struct esas2r_disc_context *dc =
1052 (struct esas2r_disc_context *)rq->interrupt_cx;
1053 struct esas2r_target *t;
1054 struct esas2r_target *t2;
1055
1056 esas2r_trace_enter();
1057
1058 /* process removals. */
1059
1060 for (t = a->targetdb; t < a->targetdb_end; t++) {
1061 if (t->new_target_state != TS_NOT_PRESENT)
1062 continue;
1063
1064 t->new_target_state = TS_INVALID;
1065
1066 /* remove the right target! */
1067
1068 t2 =
1069 esas2r_targ_db_find_by_virt_id(a,
1070 esas2r_targ_get_id(t,
1071 a));
1072
1073 if (t2)
1074 esas2r_targ_db_remove(a, t2);
1075 }
1076
1077 /* removals complete. process arrivals. */
1078
1079 dc->state = DCS_DEV_ADD;
1080 dc->curr_targ = a->targetdb;
1081
1082 esas2r_trace_exit();
1083
1084 return false;
1085}
1086
1087static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
1088 struct esas2r_request *rq)
1089{
1090 struct esas2r_disc_context *dc =
1091 (struct esas2r_disc_context *)rq->interrupt_cx;
1092 struct esas2r_target *t = dc->curr_targ;
1093
1094 if (t >= a->targetdb_end) {
1095 /* done processing state changes. */
1096
1097 dc->state = DCS_DISC_DONE;
1098 } else if (t->new_target_state == TS_PRESENT) {
1099 struct atto_vda_ae_lu *luevt = &t->lu_event;
1100
1101 esas2r_trace_enter();
1102
1103 /* clear this now in case more events come in. */
1104
1105 t->new_target_state = TS_INVALID;
1106
1107 /* setup the discovery context for adding this device. */
1108
1109 dc->curr_virt_id = esas2r_targ_get_id(t, a);
1110
1111 if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1112 + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
1113 && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
1114 dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
1115 dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
1116 } else {
1117 dc->block_size = 0;
1118 dc->interleave = 0;
1119 }
1120
1121 /* determine the device type being added. */
1122
1123 if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
1124 if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
1125 dc->state = DCS_PT_DEV_ADDR;
1126 dc->dev_addr_type = ATTO_GDA_AT_PORT;
1127 dc->curr_phys_id = luevt->wphys_target_id;
1128 } else {
1129 esas2r_log(ESAS2R_LOG_WARN,
1130 "luevt->dwevent does not have the "
1131 "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1132 __func__, __LINE__);
1133 }
1134 } else {
1135 dc->raid_grp_name[0] = 0;
1136
1137 esas2r_targ_db_add_raid(a, dc);
1138 }
1139
1140 esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
1141 esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
1142 esas2r_trace("dwevent: %d", luevt->dwevent);
1143
1144 esas2r_trace_exit();
1145 }
1146
1147 if (dc->state == DCS_DEV_ADD) {
1148 /* go to the next device. */
1149
1150 dc->curr_targ++;
1151 }
1152
1153 return false;
1154}
1155
1156/*
1157 * When discovery is done, find all requests on defer queue and
1158 * test if they need to be modified. If a target is no longer present
1159 * then complete the request with RS_SEL. Otherwise, update the
1160 * target_id since after a hibernate it can be a different value.
1161 * VDA does not make passthrough target IDs persistent.
1162 */
1163static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
1164{
1165 unsigned long flags;
1166 struct esas2r_target *t;
1167 struct esas2r_request *rq;
1168 struct list_head *element;
1169
1170 /* update virt_targ_id in any outstanding esas2r_requests */
1171
1172 spin_lock_irqsave(&a->queue_lock, flags);
1173
1174 list_for_each(element, &a->defer_list) {
1175 rq = list_entry(element, struct esas2r_request, req_list);
1176 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1177 t = a->targetdb + rq->target_id;
1178
1179 if (t->target_state == TS_PRESENT)
1180 rq->vrq->scsi.target_id = le16_to_cpu(
1181 t->virt_targ_id);
1182 else
1183 rq->req_stat = RS_SEL;
1184 }
1185
1186 }
1187
1188 spin_unlock_irqrestore(&a->queue_lock, flags);
1189}
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
new file mode 100644
index 000000000000..8582929b1fef
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -0,0 +1,1512 @@
1
2/*
3 * linux/drivers/scsi/esas2r/esas2r_flash.c
4 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
5 *
6 * Copyright (c) 2001-2013 ATTO Technology, Inc.
7 * (mailto:linuxdrivers@attotech.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45#include "esas2r.h"
46
47/* local macro defs */
48#define esas2r_nvramcalc_cksum(n) \
49 (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \
50 SASNVR_CKSUM_SEED))
51#define esas2r_nvramcalc_xor_cksum(n) \
52 (esas2r_calc_byte_xor_cksum((u8 *)(n), \
53 sizeof(struct esas2r_sas_nvram), 0))
54
55#define ESAS2R_FS_DRVR_VER 2
56
57static struct esas2r_sas_nvram default_sas_nvram = {
58 { 'E', 'S', 'A', 'S' }, /* signature */
59 SASNVR_VERSION, /* version */
60 0, /* checksum */
61 31, /* max_lun_for_target */
62 SASNVR_PCILAT_MAX, /* pci_latency */
63 SASNVR1_BOOT_DRVR, /* options1 */
64 SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */
65 | SASNVR2_SW_MUX_CTRL,
66 SASNVR_COAL_DIS, /* int_coalescing */
67 SASNVR_CMDTHR_NONE, /* cmd_throttle */
68 3, /* dev_wait_time */
69 1, /* dev_wait_count */
70 0, /* spin_up_delay */
71 0, /* ssp_align_rate */
72 { 0x50, 0x01, 0x08, 0x60, /* sas_addr */
73 0x00, 0x00, 0x00, 0x00 },
74 { SASNVR_SPEED_AUTO }, /* phy_speed */
75 { SASNVR_MUX_DISABLED }, /* SAS multiplexing */
76 { 0 }, /* phy_flags */
77 SASNVR_SORT_SAS_ADDR, /* sort_type */
78 3, /* dpm_reqcmd_lmt */
79 3, /* dpm_stndby_time */
80 0, /* dpm_active_time */
81 { 0 }, /* phy_target_id */
82 SASNVR_VSMH_DISABLED, /* virt_ses_mode */
83 SASNVR_RWM_DEFAULT, /* read_write_mode */
84 0, /* link down timeout */
85 { 0 } /* reserved */
86};
87
88static u8 cmd_to_fls_func[] = {
89 0xFF,
90 VDA_FLASH_READ,
91 VDA_FLASH_BEGINW,
92 VDA_FLASH_WRITE,
93 VDA_FLASH_COMMIT,
94 VDA_FLASH_CANCEL
95};
96
97static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed)
98{
99 u32 cksum = seed;
100 u8 *p = (u8 *)&cksum;
101
102 while (len) {
103 if (((uintptr_t)addr & 3) == 0)
104 break;
105
106 cksum = cksum ^ *addr;
107 addr++;
108 len--;
109 }
110 while (len >= sizeof(u32)) {
111 cksum = cksum ^ *(u32 *)addr;
112 addr += 4;
113 len -= 4;
114 }
115 while (len--) {
116 cksum = cksum ^ *addr;
117 addr++;
118 }
119 return p[0] ^ p[1] ^ p[2] ^ p[3];
120}
121
122static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed)
123{
124 u8 *p = (u8 *)addr;
125 u8 cksum = seed;
126
127 while (len--)
128 cksum = cksum + p[len];
129 return cksum;
130}
131
132/* Interrupt callback to process FM API write requests. */
133static void esas2r_fmapi_callback(struct esas2r_adapter *a,
134 struct esas2r_request *rq)
135{
136 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
137 struct esas2r_flash_context *fc =
138 (struct esas2r_flash_context *)rq->interrupt_cx;
139
140 if (rq->req_stat == RS_SUCCESS) {
141 /* Last request was successful. See what to do now. */
142 switch (vrq->sub_func) {
143 case VDA_FLASH_BEGINW:
144 if (fc->sgc.cur_offset == NULL)
145 goto commit;
146
147 vrq->sub_func = VDA_FLASH_WRITE;
148 rq->req_stat = RS_PENDING;
149 break;
150
151 case VDA_FLASH_WRITE:
152commit:
153 vrq->sub_func = VDA_FLASH_COMMIT;
154 rq->req_stat = RS_PENDING;
155 rq->interrupt_cb = fc->interrupt_cb;
156 break;
157
158 default:
159 break;
160 }
161 }
162
163 if (rq->req_stat != RS_PENDING)
164 /*
165 * All done. call the real callback to complete the FM API
166 * request. We should only get here if a BEGINW or WRITE
167 * operation failed.
168 */
169 (*fc->interrupt_cb)(a, rq);
170}
171
172/*
173 * Build a flash request based on the flash context. The request status
174 * is filled in on an error.
175 */
176static void build_flash_msg(struct esas2r_adapter *a,
177 struct esas2r_request *rq)
178{
179 struct esas2r_flash_context *fc =
180 (struct esas2r_flash_context *)rq->interrupt_cx;
181 struct esas2r_sg_context *sgc = &fc->sgc;
182 u8 cksum = 0;
183
184 /* calculate the checksum */
185 if (fc->func == VDA_FLASH_BEGINW) {
186 if (sgc->cur_offset)
187 cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset,
188 sgc->length,
189 0);
190 rq->interrupt_cb = esas2r_fmapi_callback;
191 } else {
192 rq->interrupt_cb = fc->interrupt_cb;
193 }
194 esas2r_build_flash_req(a,
195 rq,
196 fc->func,
197 cksum,
198 fc->flsh_addr,
199 sgc->length);
200
201 esas2r_rq_free_sg_lists(rq, a);
202
203 /*
204 * remember the length we asked for. we have to keep track of
205 * the current amount done so we know how much to compare when
206 * doing the verification phase.
207 */
208 fc->curr_len = fc->sgc.length;
209
210 if (sgc->cur_offset) {
211 /* setup the S/G context to build the S/G table */
212 esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
213
214 if (!esas2r_build_sg_list(a, rq, sgc)) {
215 rq->req_stat = RS_BUSY;
216 return;
217 }
218 } else {
219 fc->sgc.length = 0;
220 }
221
222 /* update the flsh_addr to the next one to write to */
223 fc->flsh_addr += fc->curr_len;
224}
225
226/* determine the method to process the flash request */
227static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
228{
229 /*
230 * assume we have more to do. if we return with the status set to
231 * RS_PENDING, FM API tasks will continue.
232 */
233 rq->req_stat = RS_PENDING;
234 if (a->flags & AF_DEGRADED_MODE)
235 /* not suppported for now */;
236 else
237 build_flash_msg(a, rq);
238
239 return rq->req_stat == RS_PENDING;
240}
241
242/* boot image fixer uppers called before downloading the image. */
243static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
244{
245 struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS];
246 struct esas2r_pc_image *pi;
247 struct esas2r_boot_header *bh;
248
249 pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset);
250 bh =
251 (struct esas2r_boot_header *)((u8 *)pi +
252 le16_to_cpu(pi->header_offset));
253 bh->device_id = cpu_to_le16(a->pcid->device);
254
255 /* Recalculate the checksum in the PNP header if there */
256 if (pi->pnp_offset) {
257 u8 *pnp_header_bytes =
258 ((u8 *)pi + le16_to_cpu(pi->pnp_offset));
259
260 /* Identifier - dword that starts at byte 10 */
261 *((u32 *)&pnp_header_bytes[10]) =
262 cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor,
263 a->pcid->subsystem_device));
264
265 /* Checksum - byte 9 */
266 pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes,
267 32, 0);
268 }
269
270 /* Recalculate the checksum needed by the PC */
271 pi->checksum = pi->checksum -
272 esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0);
273}
274
275static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
276{
277 struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI];
278 u32 len = ch->length;
279 u32 offset = ch->image_offset;
280 struct esas2r_efi_image *ei;
281 struct esas2r_boot_header *bh;
282
283 while (len) {
284 u32 thislen;
285
286 ei = (struct esas2r_efi_image *)((u8 *)fi + offset);
287 bh = (struct esas2r_boot_header *)((u8 *)ei +
288 le16_to_cpu(
289 ei->header_offset));
290 bh->device_id = cpu_to_le16(a->pcid->device);
291 thislen = (u32)le16_to_cpu(bh->image_length) * 512;
292
293 if (thislen > len)
294 break;
295
296 len -= thislen;
297 offset += thislen;
298 }
299}
300
301/* Complete a FM API request with the specified status. */
302static bool complete_fmapi_req(struct esas2r_adapter *a,
303 struct esas2r_request *rq, u8 fi_stat)
304{
305 struct esas2r_flash_context *fc =
306 (struct esas2r_flash_context *)rq->interrupt_cx;
307 struct esas2r_flash_img *fi = fc->fi;
308
309 fi->status = fi_stat;
310 fi->driver_error = rq->req_stat;
311 rq->interrupt_cb = NULL;
312 rq->req_stat = RS_SUCCESS;
313
314 if (fi_stat != FI_STAT_IMG_VER)
315 memset(fc->scratch, 0, FM_BUF_SZ);
316
317 esas2r_enable_heartbeat(a);
318 esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
319 return false;
320}
321
322/* Process each phase of the flash download process. */
323static void fw_download_proc(struct esas2r_adapter *a,
324 struct esas2r_request *rq)
325{
326 struct esas2r_flash_context *fc =
327 (struct esas2r_flash_context *)rq->interrupt_cx;
328 struct esas2r_flash_img *fi = fc->fi;
329 struct esas2r_component_header *ch;
330 u32 len;
331 u8 *p, *q;
332
333 /* If the previous operation failed, just return. */
334 if (rq->req_stat != RS_SUCCESS)
335 goto error;
336
337 /*
338 * If an upload just completed and the compare length is non-zero,
339 * then we just read back part of the image we just wrote. verify the
340 * section and continue reading until the entire image is verified.
341 */
342 if (fc->func == VDA_FLASH_READ
343 && fc->cmp_len) {
344 ch = &fi->cmp_hdr[fc->comp_typ];
345
346 p = fc->scratch;
347 q = (u8 *)fi /* start of the whole gob */
348 + ch->image_offset /* start of the current image */
349 + ch->length /* end of the current image */
350 - fc->cmp_len; /* where we are now */
351
352 /*
353 * NOTE - curr_len is the exact count of bytes for the read
354 * even when the end is read and its not a full buffer
355 */
356 for (len = fc->curr_len; len; len--)
357 if (*p++ != *q++)
358 goto error;
359
360 fc->cmp_len -= fc->curr_len; /* # left to compare */
361
362 /* Update fc and determine the length for the next upload */
363 if (fc->cmp_len > FM_BUF_SZ)
364 fc->sgc.length = FM_BUF_SZ;
365 else
366 fc->sgc.length = fc->cmp_len;
367
368 fc->sgc.cur_offset = fc->sgc_offset +
369 ((u8 *)fc->scratch - (u8 *)fi);
370 }
371
372 /*
373 * This code uses a 'while' statement since the next component may
374 * have a length = zero. This can happen since some components are
375 * not required. At the end of this 'while' we set up the length
376 * for the next request and therefore sgc.length can be = 0.
377 */
378 while (fc->sgc.length == 0) {
379 ch = &fi->cmp_hdr[fc->comp_typ];
380
381 switch (fc->task) {
382 case FMTSK_ERASE_BOOT:
383 /* the BIOS image is written next */
384 ch = &fi->cmp_hdr[CH_IT_BIOS];
385 if (ch->length == 0)
386 goto no_bios;
387
388 fc->task = FMTSK_WRTBIOS;
389 fc->func = VDA_FLASH_BEGINW;
390 fc->comp_typ = CH_IT_BIOS;
391 fc->flsh_addr = FLS_OFFSET_BOOT;
392 fc->sgc.length = ch->length;
393 fc->sgc.cur_offset = fc->sgc_offset +
394 ch->image_offset;
395 break;
396
397 case FMTSK_WRTBIOS:
398 /*
399 * The BIOS image has been written - read it and
400 * verify it
401 */
402 fc->task = FMTSK_READBIOS;
403 fc->func = VDA_FLASH_READ;
404 fc->flsh_addr = FLS_OFFSET_BOOT;
405 fc->cmp_len = ch->length;
406 fc->sgc.length = FM_BUF_SZ;
407 fc->sgc.cur_offset = fc->sgc_offset
408 + ((u8 *)fc->scratch -
409 (u8 *)fi);
410 break;
411
412 case FMTSK_READBIOS:
413no_bios:
414 /*
415 * Mark the component header status for the image
416 * completed
417 */
418 ch->status = CH_STAT_SUCCESS;
419
420 /* The MAC image is written next */
421 ch = &fi->cmp_hdr[CH_IT_MAC];
422 if (ch->length == 0)
423 goto no_mac;
424
425 fc->task = FMTSK_WRTMAC;
426 fc->func = VDA_FLASH_BEGINW;
427 fc->comp_typ = CH_IT_MAC;
428 fc->flsh_addr = FLS_OFFSET_BOOT
429 + fi->cmp_hdr[CH_IT_BIOS].length;
430 fc->sgc.length = ch->length;
431 fc->sgc.cur_offset = fc->sgc_offset +
432 ch->image_offset;
433 break;
434
435 case FMTSK_WRTMAC:
436 /* The MAC image has been written - read and verify */
437 fc->task = FMTSK_READMAC;
438 fc->func = VDA_FLASH_READ;
439 fc->flsh_addr -= ch->length;
440 fc->cmp_len = ch->length;
441 fc->sgc.length = FM_BUF_SZ;
442 fc->sgc.cur_offset = fc->sgc_offset
443 + ((u8 *)fc->scratch -
444 (u8 *)fi);
445 break;
446
447 case FMTSK_READMAC:
448no_mac:
449 /*
450 * Mark the component header status for the image
451 * completed
452 */
453 ch->status = CH_STAT_SUCCESS;
454
455 /* The EFI image is written next */
456 ch = &fi->cmp_hdr[CH_IT_EFI];
457 if (ch->length == 0)
458 goto no_efi;
459
460 fc->task = FMTSK_WRTEFI;
461 fc->func = VDA_FLASH_BEGINW;
462 fc->comp_typ = CH_IT_EFI;
463 fc->flsh_addr = FLS_OFFSET_BOOT
464 + fi->cmp_hdr[CH_IT_BIOS].length
465 + fi->cmp_hdr[CH_IT_MAC].length;
466 fc->sgc.length = ch->length;
467 fc->sgc.cur_offset = fc->sgc_offset +
468 ch->image_offset;
469 break;
470
471 case FMTSK_WRTEFI:
472 /* The EFI image has been written - read and verify */
473 fc->task = FMTSK_READEFI;
474 fc->func = VDA_FLASH_READ;
475 fc->flsh_addr -= ch->length;
476 fc->cmp_len = ch->length;
477 fc->sgc.length = FM_BUF_SZ;
478 fc->sgc.cur_offset = fc->sgc_offset
479 + ((u8 *)fc->scratch -
480 (u8 *)fi);
481 break;
482
483 case FMTSK_READEFI:
484no_efi:
485 /*
486 * Mark the component header status for the image
487 * completed
488 */
489 ch->status = CH_STAT_SUCCESS;
490
491 /* The CFG image is written next */
492 ch = &fi->cmp_hdr[CH_IT_CFG];
493
494 if (ch->length == 0)
495 goto no_cfg;
496 fc->task = FMTSK_WRTCFG;
497 fc->func = VDA_FLASH_BEGINW;
498 fc->comp_typ = CH_IT_CFG;
499 fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
500 fc->sgc.length = ch->length;
501 fc->sgc.cur_offset = fc->sgc_offset +
502 ch->image_offset;
503 break;
504
505 case FMTSK_WRTCFG:
506 /* The CFG image has been written - read and verify */
507 fc->task = FMTSK_READCFG;
508 fc->func = VDA_FLASH_READ;
509 fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
510 fc->cmp_len = ch->length;
511 fc->sgc.length = FM_BUF_SZ;
512 fc->sgc.cur_offset = fc->sgc_offset
513 + ((u8 *)fc->scratch -
514 (u8 *)fi);
515 break;
516
517 case FMTSK_READCFG:
518no_cfg:
519 /*
520 * Mark the component header status for the image
521 * completed
522 */
523 ch->status = CH_STAT_SUCCESS;
524
525 /*
526 * The download is complete. If in degraded mode,
527 * attempt a chip reset.
528 */
529 if (a->flags & AF_DEGRADED_MODE)
530 esas2r_local_reset_adapter(a);
531
532 a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
533 esas2r_print_flash_rev(a);
534
535 /* Update the type of boot image on the card */
536 memcpy(a->image_type, fi->rel_version,
537 sizeof(fi->rel_version));
538 complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
539 return;
540 }
541
542 /* If verifying, don't try reading more than what's there */
543 if (fc->func == VDA_FLASH_READ
544 && fc->sgc.length > fc->cmp_len)
545 fc->sgc.length = fc->cmp_len;
546 }
547
548 /* Build the request to perform the next action */
549 if (!load_image(a, rq)) {
550error:
551 if (fc->comp_typ < fi->num_comps) {
552 ch = &fi->cmp_hdr[fc->comp_typ];
553 ch->status = CH_STAT_FAILED;
554 }
555
556 complete_fmapi_req(a, rq, FI_STAT_FAILED);
557 }
558}
559
560/* Determine the flash image adaptyp for this adapter */
561static u8 get_fi_adap_type(struct esas2r_adapter *a)
562{
563 u8 type;
564
565 /* use the device ID to get the correct adap_typ for this HBA */
566 switch (a->pcid->device) {
567 case ATTO_DID_INTEL_IOP348:
568 type = FI_AT_SUN_LAKE;
569 break;
570
571 case ATTO_DID_MV_88RC9580:
572 case ATTO_DID_MV_88RC9580TS:
573 case ATTO_DID_MV_88RC9580TSE:
574 case ATTO_DID_MV_88RC9580TL:
575 type = FI_AT_MV_9580;
576 break;
577
578 default:
579 type = FI_AT_UNKNWN;
580 break;
581 }
582
583 return type;
584}
585
586/* Size of config + copyright + flash_ver images, 0 for failure. */
587static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver)
588{
589 u16 *pw = (u16 *)cfg - 1;
590 u32 sz = 0;
591 u32 len = length;
592
593 if (len == 0)
594 len = FM_BUF_SZ;
595
596 if (flash_ver)
597 *flash_ver = 0;
598
599 while (true) {
600 u16 type;
601 u16 size;
602
603 type = le16_to_cpu(*pw--);
604 size = le16_to_cpu(*pw--);
605
606 if (type != FBT_CPYR
607 && type != FBT_SETUP
608 && type != FBT_FLASH_VER)
609 break;
610
611 if (type == FBT_FLASH_VER
612 && flash_ver)
613 *flash_ver = le32_to_cpu(*(u32 *)(pw - 1));
614
615 sz += size + (2 * sizeof(u16));
616 pw -= size / sizeof(u16);
617
618 if (sz > len - (2 * sizeof(u16)))
619 break;
620 }
621
622 /* See if we are comparing the size to the specified length */
623 if (length && sz != length)
624 return 0;
625
626 return sz;
627}
628
629/* Verify that the boot image is valid */
630static u8 chk_boot(u8 *boot_img, u32 length)
631{
632 struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img;
633 u16 hdroffset = le16_to_cpu(bi->header_offset);
634 struct esas2r_boot_header *bh;
635
636 if (bi->signature != le16_to_cpu(0xaa55)
637 || (long)hdroffset >
638 (long)(65536L - sizeof(struct esas2r_boot_header))
639 || (hdroffset & 3)
640 || (hdroffset < sizeof(struct esas2r_boot_image))
641 || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length))
642 return 0xff;
643
644 bh = (struct esas2r_boot_header *)((char *)bi + hdroffset);
645
646 if (bh->signature[0] != 'P'
647 || bh->signature[1] != 'C'
648 || bh->signature[2] != 'I'
649 || bh->signature[3] != 'R'
650 || le16_to_cpu(bh->struct_length) <
651 (u16)sizeof(struct esas2r_boot_header)
652 || bh->class_code[2] != 0x01
653 || bh->class_code[1] != 0x04
654 || bh->class_code[0] != 0x00
655 || (bh->code_type != CODE_TYPE_PC
656 && bh->code_type != CODE_TYPE_OPEN
657 && bh->code_type != CODE_TYPE_EFI))
658 return 0xff;
659
660 return bh->code_type;
661}
662
663/* The sum of all the WORDS of the image */
664static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
665{
666 struct esas2r_flash_img *fi = fc->fi;
667 u16 cksum;
668 u32 len;
669 u16 *pw;
670
671 for (len = (fi->length - fc->fi_hdr_len) / 2,
672 pw = (u16 *)((u8 *)fi + fc->fi_hdr_len),
673 cksum = 0;
674 len;
675 len--, pw++)
676 cksum = cksum + le16_to_cpu(*pw);
677
678 return cksum;
679}
680
681/*
682 * Verify the flash image structure. The following verifications will
683 * be performed:
684 * 1) verify the fi_version is correct
685 * 2) verify the checksum of the entire image.
686 * 3) validate the adap_typ, action and length fields.
687 * 4) valdiate each component header. check the img_type and
688 * length fields
689 * 5) valdiate each component image. validate signatures and
690 * local checksums
691 */
692static bool verify_fi(struct esas2r_adapter *a,
693 struct esas2r_flash_context *fc)
694{
695 struct esas2r_flash_img *fi = fc->fi;
696 u8 type;
697 bool imgerr;
698 u16 i;
699 u32 len;
700 struct esas2r_component_header *ch;
701
702 /* Verify the length - length must even since we do a word checksum */
703 len = fi->length;
704
705 if ((len & 1)
706 || len < fc->fi_hdr_len) {
707 fi->status = FI_STAT_LENGTH;
708 return false;
709 }
710
711 /* Get adapter type and verify type in flash image */
712 type = get_fi_adap_type(a);
713 if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) {
714 fi->status = FI_STAT_ADAPTYP;
715 return false;
716 }
717
718 /*
719 * Loop through each component and verify the img_type and length
720 * fields. Keep a running count of the sizes sooze we can verify total
721 * size to additive size.
722 */
723 imgerr = false;
724
725 for (i = 0, len = 0, ch = fi->cmp_hdr;
726 i < fi->num_comps;
727 i++, ch++) {
728 bool cmperr = false;
729
730 /*
731 * Verify that the component header has the same index as the
732 * image type. The headers must be ordered correctly
733 */
734 if (i != ch->img_type) {
735 imgerr = true;
736 ch->status = CH_STAT_INVALID;
737 continue;
738 }
739
740 switch (ch->img_type) {
741 case CH_IT_BIOS:
742 type = CODE_TYPE_PC;
743 break;
744
745 case CH_IT_MAC:
746 type = CODE_TYPE_OPEN;
747 break;
748
749 case CH_IT_EFI:
750 type = CODE_TYPE_EFI;
751 break;
752 }
753
754 switch (ch->img_type) {
755 case CH_IT_FW:
756 case CH_IT_NVR:
757 break;
758
759 case CH_IT_BIOS:
760 case CH_IT_MAC:
761 case CH_IT_EFI:
762 if (ch->length & 0x1ff)
763 cmperr = true;
764
765 /* Test if component image is present */
766 if (ch->length == 0)
767 break;
768
769 /* Image is present - verify the image */
770 if (chk_boot((u8 *)fi + ch->image_offset, ch->length)
771 != type)
772 cmperr = true;
773
774 break;
775
776 case CH_IT_CFG:
777
778 /* Test if component image is present */
779 if (ch->length == 0) {
780 cmperr = true;
781 break;
782 }
783
784 /* Image is present - verify the image */
785 if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length,
786 ch->length, NULL))
787 cmperr = true;
788
789 break;
790
791 default:
792
793 fi->status = FI_STAT_UNKNOWN;
794 return false;
795 }
796
797 if (cmperr) {
798 imgerr = true;
799 ch->status = CH_STAT_INVALID;
800 } else {
801 ch->status = CH_STAT_PENDING;
802 len += ch->length;
803 }
804 }
805
806 if (imgerr) {
807 fi->status = FI_STAT_MISSING;
808 return false;
809 }
810
811 /* Compare fi->length to the sum of ch->length fields */
812 if (len != fi->length - fc->fi_hdr_len) {
813 fi->status = FI_STAT_LENGTH;
814 return false;
815 }
816
817 /* Compute the checksum - it should come out zero */
818 if (fi->checksum != calc_fi_checksum(fc)) {
819 fi->status = FI_STAT_CHKSUM;
820 return false;
821 }
822
823 return true;
824}
825
826/* Fill in the FS IOCTL response data from a completed request. */
827static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a,
828 struct esas2r_request *rq)
829{
830 struct esas2r_ioctl_fs *fs =
831 (struct esas2r_ioctl_fs *)rq->interrupt_cx;
832
833 if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
834 esas2r_enable_heartbeat(a);
835
836 fs->driver_error = rq->req_stat;
837
838 if (fs->driver_error == RS_SUCCESS)
839 fs->status = ATTO_STS_SUCCESS;
840 else
841 fs->status = ATTO_STS_FAILED;
842}
843
844/* Prepare an FS IOCTL request to be sent to the firmware. */
845bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
846 struct esas2r_ioctl_fs *fs,
847 struct esas2r_request *rq,
848 struct esas2r_sg_context *sgc)
849{
850 u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func);
851 struct esas2r_ioctlfs_command *fsc = &fs->command;
852 u8 func = 0;
853 u32 datalen;
854
855 fs->status = ATTO_STS_FAILED;
856 fs->driver_error = RS_PENDING;
857
858 if (fs->version > ESAS2R_FS_VER) {
859 fs->status = ATTO_STS_INV_VERSION;
860 return false;
861 }
862
863 func = cmd_to_fls_func[fsc->command];
864 if (fsc->command >= cmdcnt || func == 0xFF) {
865 fs->status = ATTO_STS_INV_FUNC;
866 return false;
867 }
868
869 if (fsc->command != ESAS2R_FS_CMD_CANCEL) {
870 if ((a->pcid->device != ATTO_DID_MV_88RC9580
871 || fs->adap_type != ESAS2R_FS_AT_ESASRAID2)
872 && (a->pcid->device != ATTO_DID_MV_88RC9580TS
873 || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2)
874 && (a->pcid->device != ATTO_DID_MV_88RC9580TSE
875 || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E)
876 && (a->pcid->device != ATTO_DID_MV_88RC9580TL
877 || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) {
878 fs->status = ATTO_STS_INV_ADAPTER;
879 return false;
880 }
881
882 if (fs->driver_ver > ESAS2R_FS_DRVR_VER) {
883 fs->status = ATTO_STS_INV_DRVR_VER;
884 return false;
885 }
886 }
887
888 if (a->flags & AF_DEGRADED_MODE) {
889 fs->status = ATTO_STS_DEGRADED;
890 return false;
891 }
892
893 rq->interrupt_cb = esas2r_complete_fs_ioctl;
894 rq->interrupt_cx = fs;
895 datalen = le32_to_cpu(fsc->length);
896 esas2r_build_flash_req(a,
897 rq,
898 func,
899 fsc->checksum,
900 le32_to_cpu(fsc->flash_addr),
901 datalen);
902
903 if (func == VDA_FLASH_WRITE
904 || func == VDA_FLASH_READ) {
905 if (datalen == 0) {
906 fs->status = ATTO_STS_INV_FUNC;
907 return false;
908 }
909
910 esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
911 sgc->length = datalen;
912
913 if (!esas2r_build_sg_list(a, rq, sgc)) {
914 fs->status = ATTO_STS_OUT_OF_RSRC;
915 return false;
916 }
917 }
918
919 if (func == VDA_FLASH_COMMIT)
920 esas2r_disable_heartbeat(a);
921
922 esas2r_start_request(a, rq);
923
924 return true;
925}
926
927static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
928{
929 u32 starttime;
930 u32 timeout;
931 u32 intstat;
932 u32 doorbell;
933
934 /* Disable chip interrupts awhile */
935 if (function == DRBL_FLASH_REQ)
936 esas2r_disable_chip_interrupts(a);
937
938 /* Issue the request to the firmware */
939 esas2r_write_register_dword(a, MU_DOORBELL_IN, function);
940
941 /* Now wait for the firmware to process it */
942 starttime = jiffies_to_msecs(jiffies);
943 timeout = a->flags &
944 (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
945
946 while (true) {
947 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
948
949 if (intstat & MU_INTSTAT_DRBL) {
950 /* Got a doorbell interrupt. Check for the function */
951 doorbell =
952 esas2r_read_register_dword(a, MU_DOORBELL_OUT);
953 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
954 doorbell);
955 if (doorbell & function)
956 break;
957 }
958
959 schedule_timeout_interruptible(msecs_to_jiffies(100));
960
961 if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
962 /*
963 * Iimeout. If we were requesting flash access,
964 * indicate we are done so the firmware knows we gave
965 * up. If this was a REQ, we also need to re-enable
966 * chip interrupts.
967 */
968 if (function == DRBL_FLASH_REQ) {
969 esas2r_hdebug("flash access timeout");
970 esas2r_write_register_dword(a, MU_DOORBELL_IN,
971 DRBL_FLASH_DONE);
972 esas2r_enable_chip_interrupts(a);
973 } else {
974 esas2r_hdebug("flash release timeout");
975 }
976
977 return false;
978 }
979 }
980
981 /* if we're done, re-enable chip interrupts */
982 if (function == DRBL_FLASH_DONE)
983 esas2r_enable_chip_interrupts(a);
984
985 return true;
986}
987
988#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE)
989
990bool esas2r_read_flash_block(struct esas2r_adapter *a,
991 void *to,
992 u32 from,
993 u32 size)
994{
995 u8 *end = (u8 *)to;
996
997 /* Try to acquire access to the flash */
998 if (!esas2r_flash_access(a, DRBL_FLASH_REQ))
999 return false;
1000
1001 while (size) {
1002 u32 len;
1003 u32 offset;
1004 u32 iatvr;
1005
1006 if (a->flags2 & AF2_SERIAL_FLASH)
1007 iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
1008 else
1009 iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
1010
1011 esas2r_map_data_window(a, iatvr);
1012 offset = from & (WINDOW_SIZE - 1);
1013 len = size;
1014
1015 if (len > WINDOW_SIZE - offset)
1016 len = WINDOW_SIZE - offset;
1017
1018 from += len;
1019 size -= len;
1020
1021 while (len--) {
1022 *end++ = esas2r_read_data_byte(a, offset);
1023 offset++;
1024 }
1025 }
1026
1027 /* Release flash access */
1028 esas2r_flash_access(a, DRBL_FLASH_DONE);
1029 return true;
1030}
1031
1032bool esas2r_read_flash_rev(struct esas2r_adapter *a)
1033{
1034 u8 bytes[256];
1035 u16 *pw;
1036 u16 *pwstart;
1037 u16 type;
1038 u16 size;
1039 u32 sz;
1040
1041 sz = sizeof(bytes);
1042 pw = (u16 *)(bytes + sz);
1043 pwstart = (u16 *)bytes + 2;
1044
1045 if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz))
1046 goto invalid_rev;
1047
1048 while (pw >= pwstart) {
1049 pw--;
1050 type = le16_to_cpu(*pw);
1051 pw--;
1052 size = le16_to_cpu(*pw);
1053 pw -= size / 2;
1054
1055 if (type == FBT_CPYR
1056 || type == FBT_SETUP
1057 || pw < pwstart)
1058 continue;
1059
1060 if (type == FBT_FLASH_VER)
1061 a->flash_ver = le32_to_cpu(*(u32 *)pw);
1062
1063 break;
1064 }
1065
1066invalid_rev:
1067 return esas2r_print_flash_rev(a);
1068}
1069
1070bool esas2r_print_flash_rev(struct esas2r_adapter *a)
1071{
1072 u16 year = LOWORD(a->flash_ver);
1073 u8 day = LOBYTE(HIWORD(a->flash_ver));
1074 u8 month = HIBYTE(HIWORD(a->flash_ver));
1075
1076 if (day == 0
1077 || month == 0
1078 || day > 31
1079 || month > 12
1080 || year < 2006
1081 || year > 9999) {
1082 strcpy(a->flash_rev, "not found");
1083 a->flash_ver = 0;
1084 return false;
1085 }
1086
1087 sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year);
1088 esas2r_hdebug("flash version: %s", a->flash_rev);
1089 return true;
1090}
1091
1092/*
1093 * Find the type of boot image type that is currently in the flash.
1094 * The chip only has a 64 KB PCI-e expansion ROM
1095 * size so only one image can be flashed at a time.
1096 */
1097bool esas2r_read_image_type(struct esas2r_adapter *a)
1098{
1099 u8 bytes[256];
1100 struct esas2r_boot_image *bi;
1101 struct esas2r_boot_header *bh;
1102 u32 sz;
1103 u32 len;
1104 u32 offset;
1105
1106 /* Start at the base of the boot images and look for a valid image */
1107 sz = sizeof(bytes);
1108 len = FLS_LENGTH_BOOT;
1109 offset = 0;
1110
1111 while (true) {
1112 if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT +
1113 offset,
1114 sz))
1115 goto invalid_rev;
1116
1117 bi = (struct esas2r_boot_image *)bytes;
1118 bh = (struct esas2r_boot_header *)((u8 *)bi +
1119 le16_to_cpu(
1120 bi->header_offset));
1121 if (bi->signature != cpu_to_le16(0xAA55))
1122 goto invalid_rev;
1123
1124 if (bh->code_type == CODE_TYPE_PC) {
1125 strcpy(a->image_type, "BIOS");
1126
1127 return true;
1128 } else if (bh->code_type == CODE_TYPE_EFI) {
1129 struct esas2r_efi_image *ei;
1130
1131 /*
1132 * So we have an EFI image. There are several types
1133 * so see which architecture we have.
1134 */
1135 ei = (struct esas2r_efi_image *)bytes;
1136
1137 switch (le16_to_cpu(ei->machine_type)) {
1138 case EFI_MACHINE_IA32:
1139 strcpy(a->image_type, "EFI 32-bit");
1140 return true;
1141
1142 case EFI_MACHINE_IA64:
1143 strcpy(a->image_type, "EFI itanium");
1144 return true;
1145
1146 case EFI_MACHINE_X64:
1147 strcpy(a->image_type, "EFI 64-bit");
1148 return true;
1149
1150 case EFI_MACHINE_EBC:
1151 strcpy(a->image_type, "EFI EBC");
1152 return true;
1153
1154 default:
1155 goto invalid_rev;
1156 }
1157 } else {
1158 u32 thislen;
1159
1160 /* jump to the next image */
1161 thislen = (u32)le16_to_cpu(bh->image_length) * 512;
1162 if (thislen == 0
1163 || thislen + offset > len
1164 || bh->indicator == INDICATOR_LAST)
1165 break;
1166
1167 offset += thislen;
1168 }
1169 }
1170
1171invalid_rev:
1172 strcpy(a->image_type, "no boot images");
1173 return false;
1174}
1175
1176/*
1177 * Read and validate current NVRAM parameters by accessing
1178 * physical NVRAM directly. if currently stored parameters are
1179 * invalid, use the defaults.
1180 */
1181bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
1182{
1183 bool result;
1184
1185 if (down_interruptible(&a->nvram_semaphore))
1186 return false;
1187
1188 if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
1189 sizeof(struct esas2r_sas_nvram))) {
1190 esas2r_hdebug("NVRAM read failed, using defaults");
1191 return false;
1192 }
1193
1194 result = esas2r_nvram_validate(a);
1195
1196 up(&a->nvram_semaphore);
1197
1198 return result;
1199}
1200
1201/* Interrupt callback to process NVRAM completions. */
1202static void esas2r_nvram_callback(struct esas2r_adapter *a,
1203 struct esas2r_request *rq)
1204{
1205 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
1206
1207 if (rq->req_stat == RS_SUCCESS) {
1208 /* last request was successful. see what to do now. */
1209
1210 switch (vrq->sub_func) {
1211 case VDA_FLASH_BEGINW:
1212 vrq->sub_func = VDA_FLASH_WRITE;
1213 rq->req_stat = RS_PENDING;
1214 break;
1215
1216 case VDA_FLASH_WRITE:
1217 vrq->sub_func = VDA_FLASH_COMMIT;
1218 rq->req_stat = RS_PENDING;
1219 break;
1220
1221 case VDA_FLASH_READ:
1222 esas2r_nvram_validate(a);
1223 break;
1224
1225 case VDA_FLASH_COMMIT:
1226 default:
1227 break;
1228 }
1229 }
1230
1231 if (rq->req_stat != RS_PENDING) {
1232 /* update the NVRAM state */
1233 if (rq->req_stat == RS_SUCCESS)
1234 esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
1235 else
1236 esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
1237
1238 esas2r_enable_heartbeat(a);
1239
1240 up(&a->nvram_semaphore);
1241 }
1242}
1243
1244/*
1245 * Write the contents of nvram to the adapter's physical NVRAM.
1246 * The cached copy of the NVRAM is also updated.
1247 */
1248bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1249 struct esas2r_sas_nvram *nvram)
1250{
1251 struct esas2r_sas_nvram *n = nvram;
1252 u8 sas_address_bytes[8];
1253 u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
1254 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
1255
1256 if (a->flags & AF_DEGRADED_MODE)
1257 return false;
1258
1259 if (down_interruptible(&a->nvram_semaphore))
1260 return false;
1261
1262 if (n == NULL)
1263 n = a->nvram;
1264
1265 /* check the validity of the settings */
1266 if (n->version > SASNVR_VERSION) {
1267 up(&a->nvram_semaphore);
1268 return false;
1269 }
1270
1271 memcpy(&sas_address_bytes[0], n->sas_addr, 8);
1272
1273 if (sas_address_bytes[0] != 0x50
1274 || sas_address_bytes[1] != 0x01
1275 || sas_address_bytes[2] != 0x08
1276 || (sas_address_bytes[3] & 0xF0) != 0x60
1277 || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) {
1278 up(&a->nvram_semaphore);
1279 return false;
1280 }
1281
1282 if (n->spin_up_delay > SASNVR_SPINUP_MAX)
1283 n->spin_up_delay = SASNVR_SPINUP_MAX;
1284
1285 n->version = SASNVR_VERSION;
1286 n->checksum = n->checksum - esas2r_nvramcalc_cksum(n);
1287 memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram));
1288
1289 /* write the NVRAM */
1290 n = a->nvram;
1291 esas2r_disable_heartbeat(a);
1292
1293 esas2r_build_flash_req(a,
1294 rq,
1295 VDA_FLASH_BEGINW,
1296 esas2r_nvramcalc_xor_cksum(n),
1297 FLS_OFFSET_NVR,
1298 sizeof(struct esas2r_sas_nvram));
1299
1300 if (a->flags & AF_LEGACY_SGE_MODE) {
1301
1302 vrq->data.sge[0].length =
1303 cpu_to_le32(SGE_LAST |
1304 sizeof(struct esas2r_sas_nvram));
1305 vrq->data.sge[0].address = cpu_to_le64(
1306 a->uncached_phys + (u64)((u8 *)n - a->uncached));
1307 } else {
1308 vrq->data.prde[0].ctl_len =
1309 cpu_to_le32(sizeof(struct esas2r_sas_nvram));
1310 vrq->data.prde[0].address = cpu_to_le64(
1311 a->uncached_phys
1312 + (u64)((u8 *)n - a->uncached));
1313 }
1314 rq->interrupt_cb = esas2r_nvram_callback;
1315 esas2r_start_request(a, rq);
1316 return true;
1317}
1318
1319/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */
1320bool esas2r_nvram_validate(struct esas2r_adapter *a)
1321{
1322 struct esas2r_sas_nvram *n = a->nvram;
1323 bool rslt = false;
1324
1325 if (n->signature[0] != 'E'
1326 || n->signature[1] != 'S'
1327 || n->signature[2] != 'A'
1328 || n->signature[3] != 'S') {
1329 esas2r_hdebug("invalid NVRAM signature");
1330 } else if (esas2r_nvramcalc_cksum(n)) {
1331 esas2r_hdebug("invalid NVRAM checksum");
1332 } else if (n->version > SASNVR_VERSION) {
1333 esas2r_hdebug("invalid NVRAM version");
1334 } else {
1335 esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
1336 rslt = true;
1337 }
1338
1339 if (rslt == false) {
1340 esas2r_hdebug("using defaults");
1341 esas2r_nvram_set_defaults(a);
1342 }
1343
1344 return rslt;
1345}
1346
1347/*
1348 * Set the cached NVRAM to defaults. note that this function sets the default
1349 * NVRAM when it has been determined that the physical NVRAM is invalid.
1350 * In this case, the SAS address is fabricated.
1351 */
1352void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
1353{
1354 struct esas2r_sas_nvram *n = a->nvram;
1355 u32 time = jiffies_to_msecs(jiffies);
1356
1357 esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
1358 memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
1359 n->sas_addr[3] |= 0x0F;
1360 n->sas_addr[4] = HIBYTE(LOWORD(time));
1361 n->sas_addr[5] = LOBYTE(LOWORD(time));
1362 n->sas_addr[6] = a->pcid->bus->number;
1363 n->sas_addr[7] = a->pcid->devfn;
1364}
1365
1366void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
1367 struct esas2r_sas_nvram *nvram)
1368{
1369 u8 sas_addr[8];
1370
1371 /*
1372 * in case we are copying the defaults into the adapter, copy the SAS
1373 * address out first.
1374 */
1375 memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
1376 memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
1377 memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
1378}
1379
1380bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
1381 struct esas2r_request *rq, struct esas2r_sg_context *sgc)
1382{
1383 struct esas2r_flash_context *fc = &a->flash_context;
1384 u8 j;
1385 struct esas2r_component_header *ch;
1386
1387 if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
1388 /* flag was already set */
1389 fi->status = FI_STAT_BUSY;
1390 return false;
1391 }
1392
1393 memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context));
1394 sgc = &fc->sgc;
1395 fc->fi = fi;
1396 fc->sgc_offset = sgc->cur_offset;
1397 rq->req_stat = RS_SUCCESS;
1398 rq->interrupt_cx = fc;
1399
1400 switch (fi->fi_version) {
1401 case FI_VERSION_1:
1402 fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf;
1403 fc->num_comps = FI_NUM_COMPS_V1;
1404 fc->fi_hdr_len = sizeof(struct esas2r_flash_img);
1405 break;
1406
1407 default:
1408 return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
1409 }
1410
1411 if (a->flags & AF_DEGRADED_MODE)
1412 return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
1413
1414 switch (fi->action) {
1415 case FI_ACT_DOWN: /* Download the components */
1416 /* Verify the format of the flash image */
1417 if (!verify_fi(a, fc))
1418 return complete_fmapi_req(a, rq, fi->status);
1419
1420 /* Adjust the BIOS fields that are dependent on the HBA */
1421 ch = &fi->cmp_hdr[CH_IT_BIOS];
1422
1423 if (ch->length)
1424 fix_bios(a, fi);
1425
1426 /* Adjust the EFI fields that are dependent on the HBA */
1427 ch = &fi->cmp_hdr[CH_IT_EFI];
1428
1429 if (ch->length)
1430 fix_efi(a, fi);
1431
1432 /*
1433 * Since the image was just modified, compute the checksum on
1434 * the modified image. First update the CRC for the composite
1435 * expansion ROM image.
1436 */
1437 fi->checksum = calc_fi_checksum(fc);
1438
1439 /* Disable the heartbeat */
1440 esas2r_disable_heartbeat(a);
1441
1442 /* Now start up the download sequence */
1443 fc->task = FMTSK_ERASE_BOOT;
1444 fc->func = VDA_FLASH_BEGINW;
1445 fc->comp_typ = CH_IT_CFG;
1446 fc->flsh_addr = FLS_OFFSET_BOOT;
1447 fc->sgc.length = FLS_LENGTH_BOOT;
1448 fc->sgc.cur_offset = NULL;
1449
1450 /* Setup the callback address */
1451 fc->interrupt_cb = fw_download_proc;
1452 break;
1453
1454 case FI_ACT_UPSZ: /* Get upload sizes */
1455 fi->adap_typ = get_fi_adap_type(a);
1456 fi->flags = 0;
1457 fi->num_comps = fc->num_comps;
1458 fi->length = fc->fi_hdr_len;
1459
1460 /* Report the type of boot image in the rel_version string */
1461 memcpy(fi->rel_version, a->image_type,
1462 sizeof(fi->rel_version));
1463
1464 /* Build the component headers */
1465 for (j = 0, ch = fi->cmp_hdr;
1466 j < fi->num_comps;
1467 j++, ch++) {
1468 ch->img_type = j;
1469 ch->status = CH_STAT_PENDING;
1470 ch->length = 0;
1471 ch->version = 0xffffffff;
1472 ch->image_offset = 0;
1473 ch->pad[0] = 0;
1474 ch->pad[1] = 0;
1475 }
1476
1477 if (a->flash_ver != 0) {
1478 fi->cmp_hdr[CH_IT_BIOS].version =
1479 fi->cmp_hdr[CH_IT_MAC].version =
1480 fi->cmp_hdr[CH_IT_EFI].version =
1481 fi->cmp_hdr[CH_IT_CFG].version
1482 = a->flash_ver;
1483
1484 fi->cmp_hdr[CH_IT_BIOS].status =
1485 fi->cmp_hdr[CH_IT_MAC].status =
1486 fi->cmp_hdr[CH_IT_EFI].status =
1487 fi->cmp_hdr[CH_IT_CFG].status =
1488 CH_STAT_SUCCESS;
1489
1490 return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
1491 }
1492
1493 /* fall through */
1494
1495 case FI_ACT_UP: /* Upload the components */
1496 default:
1497 return complete_fmapi_req(a, rq, FI_STAT_INVALID);
1498 }
1499
1500 /*
1501 * If we make it here, fc has been setup to do the first task. Call
1502 * load_image to format the request, start it, and get out. The
1503 * interrupt code will call the callback when the first message is
1504 * complete.
1505 */
1506 if (!load_image(a, rq))
1507 return complete_fmapi_req(a, rq, FI_STAT_FAILED);
1508
1509 esas2r_start_request(a, rq);
1510
1511 return true;
1512}
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
new file mode 100644
index 000000000000..3a798e7d5c56
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -0,0 +1,1773 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_init.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
47 struct esas2r_mem_desc *mem_desc,
48 u32 align)
49{
50 mem_desc->esas2r_param = mem_desc->size + align;
51 mem_desc->virt_addr = NULL;
52 mem_desc->phys_addr = 0;
53 mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
54 (size_t)mem_desc->
55 esas2r_param,
56 (dma_addr_t *)&mem_desc->
57 phys_addr,
58 GFP_KERNEL);
59
60 if (mem_desc->esas2r_data == NULL) {
61 esas2r_log(ESAS2R_LOG_CRIT,
62 "failed to allocate %lu bytes of consistent memory!",
63 (long
64 unsigned
65 int)mem_desc->esas2r_param);
66 return false;
67 }
68
69 mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
70 mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
71 memset(mem_desc->virt_addr, 0, mem_desc->size);
72 return true;
73}
74
75static void esas2r_initmem_free(struct esas2r_adapter *a,
76 struct esas2r_mem_desc *mem_desc)
77{
78 if (mem_desc->virt_addr == NULL)
79 return;
80
81 /*
82 * Careful! phys_addr and virt_addr may have been adjusted from the
83 * original allocation in order to return the desired alignment. That
84 * means we have to use the original address (in esas2r_data) and size
85 * (esas2r_param) and calculate the original physical address based on
86 * the difference between the requested and actual allocation size.
87 */
88 if (mem_desc->phys_addr) {
89 int unalign = ((u8 *)mem_desc->virt_addr) -
90 ((u8 *)mem_desc->esas2r_data);
91
92 dma_free_coherent(&a->pcid->dev,
93 (size_t)mem_desc->esas2r_param,
94 mem_desc->esas2r_data,
95 (dma_addr_t)(mem_desc->phys_addr - unalign));
96 } else {
97 kfree(mem_desc->esas2r_data);
98 }
99
100 mem_desc->virt_addr = NULL;
101}
102
103static bool alloc_vda_req(struct esas2r_adapter *a,
104 struct esas2r_request *rq)
105{
106 struct esas2r_mem_desc *memdesc = kzalloc(
107 sizeof(struct esas2r_mem_desc), GFP_KERNEL);
108
109 if (memdesc == NULL) {
110 esas2r_hdebug("could not alloc mem for vda request memdesc\n");
111 return false;
112 }
113
114 memdesc->size = sizeof(union atto_vda_req) +
115 ESAS2R_DATA_BUF_LEN;
116
117 if (!esas2r_initmem_alloc(a, memdesc, 256)) {
118 esas2r_hdebug("could not alloc mem for vda request\n");
119 kfree(memdesc);
120 return false;
121 }
122
123 a->num_vrqs++;
124 list_add(&memdesc->next_desc, &a->vrq_mds_head);
125
126 rq->vrq_md = memdesc;
127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
128 rq->vrq->scsi.handle = a->num_vrqs;
129
130 return true;
131}
132
133static void esas2r_unmap_regions(struct esas2r_adapter *a)
134{
135 if (a->regs)
136 iounmap((void __iomem *)a->regs);
137
138 a->regs = NULL;
139
140 pci_release_region(a->pcid, 2);
141
142 if (a->data_window)
143 iounmap((void __iomem *)a->data_window);
144
145 a->data_window = NULL;
146
147 pci_release_region(a->pcid, 0);
148}
149
150static int esas2r_map_regions(struct esas2r_adapter *a)
151{
152 int error;
153
154 a->regs = NULL;
155 a->data_window = NULL;
156
157 error = pci_request_region(a->pcid, 2, a->name);
158 if (error != 0) {
159 esas2r_log(ESAS2R_LOG_CRIT,
160 "pci_request_region(2) failed, error %d",
161 error);
162
163 return error;
164 }
165
166 a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
167 pci_resource_len(a->pcid, 2));
168 if (a->regs == NULL) {
169 esas2r_log(ESAS2R_LOG_CRIT,
170 "ioremap failed for regs mem region\n");
171 pci_release_region(a->pcid, 2);
172 return -EFAULT;
173 }
174
175 error = pci_request_region(a->pcid, 0, a->name);
176 if (error != 0) {
177 esas2r_log(ESAS2R_LOG_CRIT,
178 "pci_request_region(2) failed, error %d",
179 error);
180 esas2r_unmap_regions(a);
181 return error;
182 }
183
184 a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
185 0),
186 pci_resource_len(a->pcid, 0));
187 if (a->data_window == NULL) {
188 esas2r_log(ESAS2R_LOG_CRIT,
189 "ioremap failed for data_window mem region\n");
190 esas2r_unmap_regions(a);
191 return -EFAULT;
192 }
193
194 return 0;
195}
196
197static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
198{
199 int i;
200
201 /* Set up interrupt mode based on the requested value */
202 switch (intr_mode) {
203 case INTR_MODE_LEGACY:
204use_legacy_interrupts:
205 a->intr_mode = INTR_MODE_LEGACY;
206 break;
207
208 case INTR_MODE_MSI:
209 i = pci_enable_msi(a->pcid);
210 if (i != 0) {
211 esas2r_log(ESAS2R_LOG_WARN,
212 "failed to enable MSI for adapter %d, "
213 "falling back to legacy interrupts "
214 "(err=%d)", a->index,
215 i);
216 goto use_legacy_interrupts;
217 }
218 a->intr_mode = INTR_MODE_MSI;
219 esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
220 break;
221
222
223 default:
224 esas2r_log(ESAS2R_LOG_WARN,
225 "unknown interrupt_mode %d requested, "
226 "falling back to legacy interrupt",
227 interrupt_mode);
228 goto use_legacy_interrupts;
229 }
230}
231
232static void esas2r_claim_interrupts(struct esas2r_adapter *a)
233{
234 unsigned long flags = IRQF_DISABLED;
235
236 if (a->intr_mode == INTR_MODE_LEGACY)
237 flags |= IRQF_SHARED;
238
239 esas2r_log(ESAS2R_LOG_INFO,
240 "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
241 a->pcid->irq, a, a->name, flags);
242
243 if (request_irq(a->pcid->irq,
244 (a->intr_mode ==
245 INTR_MODE_LEGACY) ? esas2r_interrupt :
246 esas2r_msi_interrupt,
247 flags,
248 a->name,
249 a)) {
250 esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
251 a->pcid->irq);
252 return;
253 }
254
255 esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
256 esas2r_log(ESAS2R_LOG_INFO,
257 "claimed IRQ %d flags: 0x%lx",
258 a->pcid->irq, flags);
259}
260
261int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
262 int index)
263{
264 struct esas2r_adapter *a;
265 u64 bus_addr = 0;
266 int i;
267 void *next_uncached;
268 struct esas2r_request *first_request, *last_request;
269
270 if (index >= MAX_ADAPTERS) {
271 esas2r_log(ESAS2R_LOG_CRIT,
272 "tried to init invalid adapter index %u!",
273 index);
274 return 0;
275 }
276
277 if (esas2r_adapters[index]) {
278 esas2r_log(ESAS2R_LOG_CRIT,
279 "tried to init existing adapter index %u!",
280 index);
281 return 0;
282 }
283
284 a = (struct esas2r_adapter *)host->hostdata;
285 memset(a, 0, sizeof(struct esas2r_adapter));
286 a->pcid = pcid;
287 a->host = host;
288
289 if (sizeof(dma_addr_t) > 4) {
290 const uint64_t required_mask = dma_get_required_mask
291 (&pcid->dev);
292 if (required_mask > DMA_BIT_MASK(32)
293 && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
294 && !pci_set_consistent_dma_mask(pcid,
295 DMA_BIT_MASK(64))) {
296 esas2r_log_dev(ESAS2R_LOG_INFO,
297 &(a->pcid->dev),
298 "64-bit PCI addressing enabled\n");
299 } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
300 && !pci_set_consistent_dma_mask(pcid,
301 DMA_BIT_MASK(32))) {
302 esas2r_log_dev(ESAS2R_LOG_INFO,
303 &(a->pcid->dev),
304 "32-bit PCI addressing enabled\n");
305 } else {
306 esas2r_log(ESAS2R_LOG_CRIT,
307 "failed to set DMA mask");
308 esas2r_kill_adapter(index);
309 return 0;
310 }
311 } else {
312 if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
313 && !pci_set_consistent_dma_mask(pcid,
314 DMA_BIT_MASK(32))) {
315 esas2r_log_dev(ESAS2R_LOG_INFO,
316 &(a->pcid->dev),
317 "32-bit PCI addressing enabled\n");
318 } else {
319 esas2r_log(ESAS2R_LOG_CRIT,
320 "failed to set DMA mask");
321 esas2r_kill_adapter(index);
322 return 0;
323 }
324 }
325 esas2r_adapters[index] = a;
326 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
327 esas2r_debug("new adapter %p, name %s", a, a->name);
328 spin_lock_init(&a->request_lock);
329 spin_lock_init(&a->fw_event_lock);
330 sema_init(&a->fm_api_semaphore, 1);
331 sema_init(&a->fs_api_semaphore, 1);
332 sema_init(&a->nvram_semaphore, 1);
333
334 esas2r_fw_event_off(a);
335 snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
336 a->index);
337 a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
338
339 init_waitqueue_head(&a->buffered_ioctl_waiter);
340 init_waitqueue_head(&a->nvram_waiter);
341 init_waitqueue_head(&a->fm_api_waiter);
342 init_waitqueue_head(&a->fs_api_waiter);
343 init_waitqueue_head(&a->vda_waiter);
344
345 INIT_LIST_HEAD(&a->general_req.req_list);
346 INIT_LIST_HEAD(&a->active_list);
347 INIT_LIST_HEAD(&a->defer_list);
348 INIT_LIST_HEAD(&a->free_sg_list_head);
349 INIT_LIST_HEAD(&a->avail_request);
350 INIT_LIST_HEAD(&a->vrq_mds_head);
351 INIT_LIST_HEAD(&a->fw_event_list);
352
353 first_request = (struct esas2r_request *)((u8 *)(a + 1));
354
355 for (last_request = first_request, i = 1; i < num_requests;
356 last_request++, i++) {
357 INIT_LIST_HEAD(&last_request->req_list);
358 list_add_tail(&last_request->comp_list, &a->avail_request);
359 if (!alloc_vda_req(a, last_request)) {
360 esas2r_log(ESAS2R_LOG_CRIT,
361 "failed to allocate a VDA request!");
362 esas2r_kill_adapter(index);
363 return 0;
364 }
365 }
366
367 esas2r_debug("requests: %p to %p (%d, %d)", first_request,
368 last_request,
369 sizeof(*first_request),
370 num_requests);
371
372 if (esas2r_map_regions(a) != 0) {
373 esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
374 esas2r_kill_adapter(index);
375 return 0;
376 }
377
378 a->index = index;
379
380 /* interrupts will be disabled until we are done with init */
381 atomic_inc(&a->dis_ints_cnt);
382 atomic_inc(&a->disable_cnt);
383 a->flags |= AF_CHPRST_PENDING
384 | AF_DISC_PENDING
385 | AF_FIRST_INIT
386 | AF_LEGACY_SGE_MODE;
387
388 a->init_msg = ESAS2R_INIT_MSG_START;
389 a->max_vdareq_size = 128;
390 a->build_sgl = esas2r_build_sg_list_sge;
391
392 esas2r_setup_interrupts(a, interrupt_mode);
393
394 a->uncached_size = esas2r_get_uncached_size(a);
395 a->uncached = dma_alloc_coherent(&pcid->dev,
396 (size_t)a->uncached_size,
397 (dma_addr_t *)&bus_addr,
398 GFP_KERNEL);
399 if (a->uncached == NULL) {
400 esas2r_log(ESAS2R_LOG_CRIT,
401 "failed to allocate %d bytes of consistent memory!",
402 a->uncached_size);
403 esas2r_kill_adapter(index);
404 return 0;
405 }
406
407 a->uncached_phys = bus_addr;
408
409 esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
410 a->uncached_size,
411 a->uncached,
412 upper_32_bits(bus_addr),
413 lower_32_bits(bus_addr));
414 memset(a->uncached, 0, a->uncached_size);
415 next_uncached = a->uncached;
416
417 if (!esas2r_init_adapter_struct(a,
418 &next_uncached)) {
419 esas2r_log(ESAS2R_LOG_CRIT,
420 "failed to initialize adapter structure (2)!");
421 esas2r_kill_adapter(index);
422 return 0;
423 }
424
425 tasklet_init(&a->tasklet,
426 esas2r_adapter_tasklet,
427 (unsigned long)a);
428
429 /*
430 * Disable chip interrupts to prevent spurious interrupts
431 * until we claim the IRQ.
432 */
433 esas2r_disable_chip_interrupts(a);
434 esas2r_check_adapter(a);
435
436 if (!esas2r_init_adapter_hw(a, true))
437 esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
438 else
439 esas2r_debug("esas2r_init_adapter ok");
440
441 esas2r_claim_interrupts(a);
442
443 if (a->flags2 & AF2_IRQ_CLAIMED)
444 esas2r_enable_chip_interrupts(a);
445
446 esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
447 if (!(a->flags & AF_DEGRADED_MODE))
448 esas2r_kickoff_timer(a);
449 esas2r_debug("esas2r_init_adapter done for %p (%d)",
450 a, a->disable_cnt);
451
452 return 1;
453}
454
455static void esas2r_adapter_power_down(struct esas2r_adapter *a,
456 int power_management)
457{
458 struct esas2r_mem_desc *memdesc, *next;
459
460 if ((a->flags2 & AF2_INIT_DONE)
461 && (!(a->flags & AF_DEGRADED_MODE))) {
462 if (!power_management) {
463 del_timer_sync(&a->timer);
464 tasklet_kill(&a->tasklet);
465 }
466 esas2r_power_down(a);
467
468 /*
469 * There are versions of firmware that do not handle the sync
470 * cache command correctly. Stall here to ensure that the
471 * cache is lazily flushed.
472 */
473 mdelay(500);
474 esas2r_debug("chip halted");
475 }
476
477 /* Remove sysfs binary files */
478 if (a->sysfs_fw_created) {
479 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
480 a->sysfs_fw_created = 0;
481 }
482
483 if (a->sysfs_fs_created) {
484 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
485 a->sysfs_fs_created = 0;
486 }
487
488 if (a->sysfs_vda_created) {
489 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
490 a->sysfs_vda_created = 0;
491 }
492
493 if (a->sysfs_hw_created) {
494 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
495 a->sysfs_hw_created = 0;
496 }
497
498 if (a->sysfs_live_nvram_created) {
499 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
500 &bin_attr_live_nvram);
501 a->sysfs_live_nvram_created = 0;
502 }
503
504 if (a->sysfs_default_nvram_created) {
505 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
506 &bin_attr_default_nvram);
507 a->sysfs_default_nvram_created = 0;
508 }
509
510 /* Clean up interrupts */
511 if (a->flags2 & AF2_IRQ_CLAIMED) {
512 esas2r_log_dev(ESAS2R_LOG_INFO,
513 &(a->pcid->dev),
514 "free_irq(%d) called", a->pcid->irq);
515
516 free_irq(a->pcid->irq, a);
517 esas2r_debug("IRQ released");
518 esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
519 }
520
521 if (a->flags2 & AF2_MSI_ENABLED) {
522 pci_disable_msi(a->pcid);
523 esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
524 esas2r_debug("MSI disabled");
525 }
526
527 if (a->inbound_list_md.virt_addr)
528 esas2r_initmem_free(a, &a->inbound_list_md);
529
530 if (a->outbound_list_md.virt_addr)
531 esas2r_initmem_free(a, &a->outbound_list_md);
532
533 list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
534 next_desc) {
535 esas2r_initmem_free(a, memdesc);
536 }
537
538 /* Following frees everything allocated via alloc_vda_req */
539 list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
540 esas2r_initmem_free(a, memdesc);
541 list_del(&memdesc->next_desc);
542 kfree(memdesc);
543 }
544
545 kfree(a->first_ae_req);
546 a->first_ae_req = NULL;
547
548 kfree(a->sg_list_mds);
549 a->sg_list_mds = NULL;
550
551 kfree(a->req_table);
552 a->req_table = NULL;
553
554 if (a->regs) {
555 esas2r_unmap_regions(a);
556 a->regs = NULL;
557 a->data_window = NULL;
558 esas2r_debug("regions unmapped");
559 }
560}
561
562/* Release/free allocated resources for specified adapters. */
563void esas2r_kill_adapter(int i)
564{
565 struct esas2r_adapter *a = esas2r_adapters[i];
566
567 if (a) {
568 unsigned long flags;
569 struct workqueue_struct *wq;
570 esas2r_debug("killing adapter %p [%d] ", a, i);
571 esas2r_fw_event_off(a);
572 esas2r_adapter_power_down(a, 0);
573 if (esas2r_buffered_ioctl &&
574 (a->pcid == esas2r_buffered_ioctl_pcid)) {
575 dma_free_coherent(&a->pcid->dev,
576 (size_t)esas2r_buffered_ioctl_size,
577 esas2r_buffered_ioctl,
578 esas2r_buffered_ioctl_addr);
579 esas2r_buffered_ioctl = NULL;
580 }
581
582 if (a->vda_buffer) {
583 dma_free_coherent(&a->pcid->dev,
584 (size_t)VDA_MAX_BUFFER_SIZE,
585 a->vda_buffer,
586 (dma_addr_t)a->ppvda_buffer);
587 a->vda_buffer = NULL;
588 }
589 if (a->fs_api_buffer) {
590 dma_free_coherent(&a->pcid->dev,
591 (size_t)a->fs_api_buffer_size,
592 a->fs_api_buffer,
593 (dma_addr_t)a->ppfs_api_buffer);
594 a->fs_api_buffer = NULL;
595 }
596
597 kfree(a->local_atto_ioctl);
598 a->local_atto_ioctl = NULL;
599
600 spin_lock_irqsave(&a->fw_event_lock, flags);
601 wq = a->fw_event_q;
602 a->fw_event_q = NULL;
603 spin_unlock_irqrestore(&a->fw_event_lock, flags);
604 if (wq)
605 destroy_workqueue(wq);
606
607 if (a->uncached) {
608 dma_free_coherent(&a->pcid->dev,
609 (size_t)a->uncached_size,
610 a->uncached,
611 (dma_addr_t)a->uncached_phys);
612 a->uncached = NULL;
613 esas2r_debug("uncached area freed");
614 }
615
616 esas2r_log_dev(ESAS2R_LOG_INFO,
617 &(a->pcid->dev),
618 "pci_disable_device() called. msix_enabled: %d "
619 "msi_enabled: %d irq: %d pin: %d",
620 a->pcid->msix_enabled,
621 a->pcid->msi_enabled,
622 a->pcid->irq,
623 a->pcid->pin);
624
625 esas2r_log_dev(ESAS2R_LOG_INFO,
626 &(a->pcid->dev),
627 "before pci_disable_device() enable_cnt: %d",
628 a->pcid->enable_cnt.counter);
629
630 pci_disable_device(a->pcid);
631 esas2r_log_dev(ESAS2R_LOG_INFO,
632 &(a->pcid->dev),
633 "after pci_disable_device() enable_cnt: %d",
634 a->pcid->enable_cnt.counter);
635
636 esas2r_log_dev(ESAS2R_LOG_INFO,
637 &(a->pcid->dev),
638 "pci_set_drv_data(%p, NULL) called",
639 a->pcid);
640
641 pci_set_drvdata(a->pcid, NULL);
642 esas2r_adapters[i] = NULL;
643
644 if (a->flags2 & AF2_INIT_DONE) {
645 esas2r_lock_clear_flags(&a->flags2,
646 AF2_INIT_DONE);
647
648 esas2r_lock_set_flags(&a->flags,
649 AF_DEGRADED_MODE);
650
651 esas2r_log_dev(ESAS2R_LOG_INFO,
652 &(a->host->shost_gendev),
653 "scsi_remove_host() called");
654
655 scsi_remove_host(a->host);
656
657 esas2r_log_dev(ESAS2R_LOG_INFO,
658 &(a->host->shost_gendev),
659 "scsi_host_put() called");
660
661 scsi_host_put(a->host);
662 }
663 }
664}
665
666int esas2r_cleanup(struct Scsi_Host *host)
667{
668 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
669 int index;
670
671 if (host == NULL) {
672 int i;
673
674 esas2r_debug("esas2r_cleanup everything");
675 for (i = 0; i < MAX_ADAPTERS; i++)
676 esas2r_kill_adapter(i);
677 return -1;
678 }
679
680 esas2r_debug("esas2r_cleanup called for host %p", host);
681 index = a->index;
682 esas2r_kill_adapter(index);
683 return index;
684}
685
686int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
687{
688 struct Scsi_Host *host = pci_get_drvdata(pdev);
689 u32 device_state;
690 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
691
692 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
693 if (!a)
694 return -ENODEV;
695
696 esas2r_adapter_power_down(a, 1);
697 device_state = pci_choose_state(pdev, state);
698 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
699 "pci_save_state() called");
700 pci_save_state(pdev);
701 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
702 "pci_disable_device() called");
703 pci_disable_device(pdev);
704 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
705 "pci_set_power_state() called");
706 pci_set_power_state(pdev, device_state);
707 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
708 return 0;
709}
710
711int esas2r_resume(struct pci_dev *pdev)
712{
713 struct Scsi_Host *host = pci_get_drvdata(pdev);
714 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
715 int rez;
716
717 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
718 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
719 "pci_set_power_state(PCI_D0) "
720 "called");
721 pci_set_power_state(pdev, PCI_D0);
722 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
723 "pci_enable_wake(PCI_D0, 0) "
724 "called");
725 pci_enable_wake(pdev, PCI_D0, 0);
726 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
727 "pci_restore_state() called");
728 pci_restore_state(pdev);
729 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
730 "pci_enable_device() called");
731 rez = pci_enable_device(pdev);
732 pci_set_master(pdev);
733
734 if (!a) {
735 rez = -ENODEV;
736 goto error_exit;
737 }
738
739 if (esas2r_map_regions(a) != 0) {
740 esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
741 rez = -ENOMEM;
742 goto error_exit;
743 }
744
745 /* Set up interupt mode */
746 esas2r_setup_interrupts(a, a->intr_mode);
747
748 /*
749 * Disable chip interrupts to prevent spurious interrupts until we
750 * claim the IRQ.
751 */
752 esas2r_disable_chip_interrupts(a);
753 if (!esas2r_power_up(a, true)) {
754 esas2r_debug("yikes, esas2r_power_up failed");
755 rez = -ENOMEM;
756 goto error_exit;
757 }
758
759 esas2r_claim_interrupts(a);
760
761 if (a->flags2 & AF2_IRQ_CLAIMED) {
762 /*
763 * Now that system interrupt(s) are claimed, we can enable
764 * chip interrupts.
765 */
766 esas2r_enable_chip_interrupts(a);
767 esas2r_kickoff_timer(a);
768 } else {
769 esas2r_debug("yikes, unable to claim IRQ");
770 esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
771 rez = -ENOMEM;
772 goto error_exit;
773 }
774
775error_exit:
776 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
777 rez);
778 return rez;
779}
780
781bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
782{
783 esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
784 esas2r_log(ESAS2R_LOG_CRIT,
785 "setting adapter to degraded mode: %s\n", error_str);
786 return false;
787}
788
789u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
790{
791 return sizeof(struct esas2r_sas_nvram)
792 + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
793 + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
794 + 8
795 + (num_sg_lists * (u16)sgl_page_size)
796 + ALIGN((num_requests + num_ae_requests + 1 +
797 ESAS2R_LIST_EXTRA) *
798 sizeof(struct esas2r_inbound_list_source_entry),
799 8)
800 + ALIGN((num_requests + num_ae_requests + 1 +
801 ESAS2R_LIST_EXTRA) *
802 sizeof(struct atto_vda_ob_rsp), 8)
803 + 256; /* VDA request and buffer align */
804}
805
806static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
807{
808 int pcie_cap_reg;
809
810 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
811 if (0xffff && pcie_cap_reg) {
812 u16 devcontrol;
813
814 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
815 &devcontrol);
816
817 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
818 esas2r_log(ESAS2R_LOG_INFO,
819 "max read request size > 512B");
820
821 devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
822 devcontrol |= 0x2000;
823 pci_write_config_word(a->pcid,
824 pcie_cap_reg + PCI_EXP_DEVCTL,
825 devcontrol);
826 }
827 }
828}
829
830/*
831 * Determine the organization of the uncached data area and
832 * finish initializing the adapter structure
833 */
834bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
835 void **uncached_area)
836{
837 u32 i;
838 u8 *high;
839 struct esas2r_inbound_list_source_entry *element;
840 struct esas2r_request *rq;
841 struct esas2r_mem_desc *sgl;
842
843 spin_lock_init(&a->sg_list_lock);
844 spin_lock_init(&a->mem_lock);
845 spin_lock_init(&a->queue_lock);
846
847 a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
848
849 if (!alloc_vda_req(a, &a->general_req)) {
850 esas2r_hdebug(
851 "failed to allocate a VDA request for the general req!");
852 return false;
853 }
854
855 /* allocate requests for asynchronous events */
856 a->first_ae_req =
857 kzalloc(num_ae_requests * sizeof(struct esas2r_request),
858 GFP_KERNEL);
859
860 if (a->first_ae_req == NULL) {
861 esas2r_log(ESAS2R_LOG_CRIT,
862 "failed to allocate memory for asynchronous events");
863 return false;
864 }
865
866 /* allocate the S/G list memory descriptors */
867 a->sg_list_mds = kzalloc(
868 num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
869
870 if (a->sg_list_mds == NULL) {
871 esas2r_log(ESAS2R_LOG_CRIT,
872 "failed to allocate memory for s/g list descriptors");
873 return false;
874 }
875
876 /* allocate the request table */
877 a->req_table =
878 kzalloc((num_requests + num_ae_requests +
879 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
880
881 if (a->req_table == NULL) {
882 esas2r_log(ESAS2R_LOG_CRIT,
883 "failed to allocate memory for the request table");
884 return false;
885 }
886
887 /* initialize PCI configuration space */
888 esas2r_init_pci_cfg_space(a);
889
890 /*
891 * the thunder_stream boards all have a serial flash part that has a
892 * different base address on the AHB bus.
893 */
894 if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
895 && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
896 a->flags2 |= AF2_THUNDERBOLT;
897
898 if (a->flags2 & AF2_THUNDERBOLT)
899 a->flags2 |= AF2_SERIAL_FLASH;
900
901 if (a->pcid->subsystem_device == ATTO_TLSH_1068)
902 a->flags2 |= AF2_THUNDERLINK;
903
904 /* Uncached Area */
905 high = (u8 *)*uncached_area;
906
907 /* initialize the scatter/gather table pages */
908
909 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
910 sgl->size = sgl_page_size;
911
912 list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
913
914 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
915 /* Allow the driver to load if the minimum count met. */
916 if (i < NUM_SGL_MIN)
917 return false;
918 break;
919 }
920 }
921
922 /* compute the size of the lists */
923 a->list_size = num_requests + ESAS2R_LIST_EXTRA;
924
925 /* allocate the inbound list */
926 a->inbound_list_md.size = a->list_size *
927 sizeof(struct
928 esas2r_inbound_list_source_entry);
929
930 if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
931 esas2r_hdebug("failed to allocate IB list");
932 return false;
933 }
934
935 /* allocate the outbound list */
936 a->outbound_list_md.size = a->list_size *
937 sizeof(struct atto_vda_ob_rsp);
938
939 if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
940 ESAS2R_LIST_ALIGN)) {
941 esas2r_hdebug("failed to allocate IB list");
942 return false;
943 }
944
945 /* allocate the NVRAM structure */
946 a->nvram = (struct esas2r_sas_nvram *)high;
947 high += sizeof(struct esas2r_sas_nvram);
948
949 /* allocate the discovery buffer */
950 a->disc_buffer = high;
951 high += ESAS2R_DISC_BUF_LEN;
952 high = PTR_ALIGN(high, 8);
953
954 /* allocate the outbound list copy pointer */
955 a->outbound_copy = (u32 volatile *)high;
956 high += sizeof(u32);
957
958 if (!(a->flags & AF_NVR_VALID))
959 esas2r_nvram_set_defaults(a);
960
961 /* update the caller's uncached memory area pointer */
962 *uncached_area = (void *)high;
963
964 /* initialize the allocated memory */
965 if (a->flags & AF_FIRST_INIT) {
966 memset(a->req_table, 0,
967 (num_requests + num_ae_requests +
968 1) * sizeof(struct esas2r_request *));
969
970 esas2r_targ_db_initialize(a);
971
972 /* prime parts of the inbound list */
973 element =
974 (struct esas2r_inbound_list_source_entry *)a->
975 inbound_list_md.
976 virt_addr;
977
978 for (i = 0; i < a->list_size; i++) {
979 element->address = 0;
980 element->reserved = 0;
981 element->length = cpu_to_le32(HWILSE_INTERFACE_F0
982 | (sizeof(union
983 atto_vda_req)
984 /
985 sizeof(u32)));
986 element++;
987 }
988
989 /* init the AE requests */
990 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
991 i++) {
992 INIT_LIST_HEAD(&rq->req_list);
993 if (!alloc_vda_req(a, rq)) {
994 esas2r_hdebug(
995 "failed to allocate a VDA request!");
996 return false;
997 }
998
999 esas2r_rq_init_request(rq, a);
1000
1001 /* override the completion function */
1002 rq->comp_cb = esas2r_ae_complete;
1003 }
1004 }
1005
1006 return true;
1007}
1008
1009/* This code will verify that the chip is operational. */
1010bool esas2r_check_adapter(struct esas2r_adapter *a)
1011{
1012 u32 starttime;
1013 u32 doorbell;
1014 u64 ppaddr;
1015 u32 dw;
1016
1017 /*
1018 * if the chip reset detected flag is set, we can bypass a bunch of
1019 * stuff.
1020 */
1021 if (a->flags & AF_CHPRST_DETECTED)
1022 goto skip_chip_reset;
1023
1024 /*
1025 * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
1026 * may have left them enabled or we may be recovering from a fault.
1027 */
1028 esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
1029 esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
1030
1031 /*
1032 * wait for the firmware to become ready by forcing an interrupt and
1033 * waiting for a response.
1034 */
1035 starttime = jiffies_to_msecs(jiffies);
1036
1037 while (true) {
1038 esas2r_force_interrupt(a);
1039 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1040 if (doorbell == 0xFFFFFFFF) {
1041 /*
1042 * Give the firmware up to two seconds to enable
1043 * register access after a reset.
1044 */
1045 if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
1046 return esas2r_set_degraded_mode(a,
1047 "unable to access registers");
1048 } else if (doorbell & DRBL_FORCE_INT) {
1049 u32 ver = (doorbell & DRBL_FW_VER_MSK);
1050
1051 /*
1052 * This driver supports version 0 and version 1 of
1053 * the API
1054 */
1055 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1056 doorbell);
1057
1058 if (ver == DRBL_FW_VER_0) {
1059 esas2r_lock_set_flags(&a->flags,
1060 AF_LEGACY_SGE_MODE);
1061
1062 a->max_vdareq_size = 128;
1063 a->build_sgl = esas2r_build_sg_list_sge;
1064 } else if (ver == DRBL_FW_VER_1) {
1065 esas2r_lock_clear_flags(&a->flags,
1066 AF_LEGACY_SGE_MODE);
1067
1068 a->max_vdareq_size = 1024;
1069 a->build_sgl = esas2r_build_sg_list_prd;
1070 } else {
1071 return esas2r_set_degraded_mode(a,
1072 "unknown firmware version");
1073 }
1074 break;
1075 }
1076
1077 schedule_timeout_interruptible(msecs_to_jiffies(100));
1078
1079 if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
1080 esas2r_hdebug("FW ready TMO");
1081 esas2r_bugon();
1082
1083 return esas2r_set_degraded_mode(a,
1084 "firmware start has timed out");
1085 }
1086 }
1087
1088 /* purge any asynchronous events since we will repost them later */
1089 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
1090 starttime = jiffies_to_msecs(jiffies);
1091
1092 while (true) {
1093 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1094 if (doorbell & DRBL_MSG_IFC_DOWN) {
1095 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1096 doorbell);
1097 break;
1098 }
1099
1100 schedule_timeout_interruptible(msecs_to_jiffies(50));
1101
1102 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1103 esas2r_hdebug("timeout waiting for interface down");
1104 break;
1105 }
1106 }
1107skip_chip_reset:
1108 /*
1109 * first things first, before we go changing any of these registers
1110 * disable the communication lists.
1111 */
1112 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1113 dw &= ~MU_ILC_ENABLE;
1114 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1115 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1116 dw &= ~MU_OLC_ENABLE;
1117 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1118
1119 /* configure the communication list addresses */
1120 ppaddr = a->inbound_list_md.phys_addr;
1121 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
1122 lower_32_bits(ppaddr));
1123 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
1124 upper_32_bits(ppaddr));
1125 ppaddr = a->outbound_list_md.phys_addr;
1126 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
1127 lower_32_bits(ppaddr));
1128 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
1129 upper_32_bits(ppaddr));
1130 ppaddr = a->uncached_phys +
1131 ((u8 *)a->outbound_copy - a->uncached);
1132 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
1133 lower_32_bits(ppaddr));
1134 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
1135 upper_32_bits(ppaddr));
1136
1137 /* reset the read and write pointers */
1138 *a->outbound_copy =
1139 a->last_write =
1140 a->last_read = a->list_size - 1;
1141 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
1142 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
1143 a->last_write);
1144 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
1145 a->last_write);
1146 esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
1147 a->last_write);
1148 esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
1149 MU_OLW_TOGGLE | a->last_write);
1150
1151 /* configure the interface select fields */
1152 dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
1153 dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
1154 esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
1155 (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
1156 dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
1157 dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
1158 esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
1159 (dw | MU_OLIC_LIST_F0 |
1160 MU_OLIC_SOURCE_DDR));
1161
1162 /* finish configuring the communication lists */
1163 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1164 dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
1165 dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
1166 | (a->list_size << MU_ILC_NUMBER_SHIFT);
1167 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1168 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1169 dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
1170 dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
1171 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1172
1173 /*
1174 * notify the firmware that we're done setting up the communication
1175 * list registers. wait here until the firmware is done configuring
1176 * its lists. it will signal that it is done by enabling the lists.
1177 */
1178 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
1179 starttime = jiffies_to_msecs(jiffies);
1180
1181 while (true) {
1182 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1183 if (doorbell & DRBL_MSG_IFC_INIT) {
1184 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1185 doorbell);
1186 break;
1187 }
1188
1189 schedule_timeout_interruptible(msecs_to_jiffies(100));
1190
1191 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1192 esas2r_hdebug(
1193 "timeout waiting for communication list init");
1194 esas2r_bugon();
1195 return esas2r_set_degraded_mode(a,
1196 "timeout waiting for communication list init");
1197 }
1198 }
1199
1200 /*
1201 * flag whether the firmware supports the power down doorbell. we
1202 * determine this by reading the inbound doorbell enable mask.
1203 */
1204 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
1205 if (doorbell & DRBL_POWER_DOWN)
1206 esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
1207 else
1208 esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
1209
1210 /*
1211 * enable assertion of outbound queue and doorbell interrupts in the
1212 * main interrupt cause register.
1213 */
1214 esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
1215 esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
1216 return true;
1217}
1218
1219/* Process the initialization message just completed and format the next one. */
1220static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1221 struct esas2r_request *rq)
1222{
1223 u32 msg = a->init_msg;
1224 struct atto_vda_cfg_init *ci;
1225
1226 a->init_msg = 0;
1227
1228 switch (msg) {
1229 case ESAS2R_INIT_MSG_START:
1230 case ESAS2R_INIT_MSG_REINIT:
1231 {
1232 struct timeval now;
1233 do_gettimeofday(&now);
1234 esas2r_hdebug("CFG init");
1235 esas2r_build_cfg_req(a,
1236 rq,
1237 VDA_CFG_INIT,
1238 0,
1239 NULL);
1240 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
1241 ci->sgl_page_size = sgl_page_size;
1242 ci->epoch_time = now.tv_sec;
1243 rq->flags |= RF_FAILURE_OK;
1244 a->init_msg = ESAS2R_INIT_MSG_INIT;
1245 break;
1246 }
1247
1248 case ESAS2R_INIT_MSG_INIT:
1249 if (rq->req_stat == RS_SUCCESS) {
1250 u32 major;
1251 u32 minor;
1252
1253 a->fw_version = le16_to_cpu(
1254 rq->func_rsp.cfg_rsp.vda_version);
1255 a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
1256 major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
1257 minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
1258 a->fw_version += (major << 16) + (minor << 24);
1259 } else {
1260 esas2r_hdebug("FAILED");
1261 }
1262
1263 /*
1264 * the 2.71 and earlier releases of R6xx firmware did not error
1265 * unsupported config requests correctly.
1266 */
1267
1268 if ((a->flags2 & AF2_THUNDERBOLT)
1269 || (be32_to_cpu(a->fw_version) >
1270 be32_to_cpu(0x47020052))) {
1271 esas2r_hdebug("CFG get init");
1272 esas2r_build_cfg_req(a,
1273 rq,
1274 VDA_CFG_GET_INIT2,
1275 sizeof(struct atto_vda_cfg_init),
1276 NULL);
1277
1278 rq->vrq->cfg.sg_list_offset = offsetof(
1279 struct atto_vda_cfg_req,
1280 data.sge);
1281 rq->vrq->cfg.data.prde.ctl_len =
1282 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
1283 rq->vrq->cfg.data.prde.address = cpu_to_le64(
1284 rq->vrq_md->phys_addr +
1285 sizeof(union atto_vda_req));
1286 rq->flags |= RF_FAILURE_OK;
1287 a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
1288 break;
1289 }
1290
1291 case ESAS2R_INIT_MSG_GET_INIT:
1292 if (msg == ESAS2R_INIT_MSG_GET_INIT) {
1293 ci = (struct atto_vda_cfg_init *)rq->data_buf;
1294 if (rq->req_stat == RS_SUCCESS) {
1295 a->num_targets_backend =
1296 le32_to_cpu(ci->num_targets_backend);
1297 a->ioctl_tunnel =
1298 le32_to_cpu(ci->ioctl_tunnel);
1299 } else {
1300 esas2r_hdebug("FAILED");
1301 }
1302 }
1303 /* fall through */
1304
1305 default:
1306 rq->req_stat = RS_SUCCESS;
1307 return false;
1308 }
1309 return true;
1310}
1311
1312/*
1313 * Perform initialization messages via the request queue. Messages are
1314 * performed with interrupts disabled.
1315 */
1316bool esas2r_init_msgs(struct esas2r_adapter *a)
1317{
1318 bool success = true;
1319 struct esas2r_request *rq = &a->general_req;
1320
1321 esas2r_rq_init_request(rq, a);
1322 rq->comp_cb = esas2r_dummy_complete;
1323
1324 if (a->init_msg == 0)
1325 a->init_msg = ESAS2R_INIT_MSG_REINIT;
1326
1327 while (a->init_msg) {
1328 if (esas2r_format_init_msg(a, rq)) {
1329 unsigned long flags;
1330 while (true) {
1331 spin_lock_irqsave(&a->queue_lock, flags);
1332 esas2r_start_vda_request(a, rq);
1333 spin_unlock_irqrestore(&a->queue_lock, flags);
1334 esas2r_wait_request(a, rq);
1335 if (rq->req_stat != RS_PENDING)
1336 break;
1337 }
1338 }
1339
1340 if (rq->req_stat == RS_SUCCESS
1341 || ((rq->flags & RF_FAILURE_OK)
1342 && rq->req_stat != RS_TIMEOUT))
1343 continue;
1344
1345 esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
1346 a->init_msg, rq->req_stat, rq->flags);
1347 a->init_msg = ESAS2R_INIT_MSG_START;
1348 success = false;
1349 break;
1350 }
1351
1352 esas2r_rq_destroy_request(rq, a);
1353 return success;
1354}
1355
1356/* Initialize the adapter chip */
1357bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
1358{
1359 bool rslt = false;
1360 struct esas2r_request *rq;
1361 u32 i;
1362
1363 if (a->flags & AF_DEGRADED_MODE)
1364 goto exit;
1365
1366 if (!(a->flags & AF_NVR_VALID)) {
1367 if (!esas2r_nvram_read_direct(a))
1368 esas2r_log(ESAS2R_LOG_WARN,
1369 "invalid/missing NVRAM parameters");
1370 }
1371
1372 if (!esas2r_init_msgs(a)) {
1373 esas2r_set_degraded_mode(a, "init messages failed");
1374 goto exit;
1375 }
1376
1377 /* The firmware is ready. */
1378 esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
1379 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
1380
1381 /* Post all the async event requests */
1382 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
1383 esas2r_start_ae_request(a, rq);
1384
1385 if (!a->flash_rev[0])
1386 esas2r_read_flash_rev(a);
1387
1388 if (!a->image_type[0])
1389 esas2r_read_image_type(a);
1390
1391 if (a->fw_version == 0)
1392 a->fw_rev[0] = 0;
1393 else
1394 sprintf(a->fw_rev, "%1d.%02d",
1395 (int)LOBYTE(HIWORD(a->fw_version)),
1396 (int)HIBYTE(HIWORD(a->fw_version)));
1397
1398 esas2r_hdebug("firmware revision: %s", a->fw_rev);
1399
1400 if ((a->flags & AF_CHPRST_DETECTED)
1401 && (a->flags & AF_FIRST_INIT)) {
1402 esas2r_enable_chip_interrupts(a);
1403 return true;
1404 }
1405
1406 /* initialize discovery */
1407 esas2r_disc_initialize(a);
1408
1409 /*
1410 * wait for the device wait time to expire here if requested. this is
1411 * usually requested during initial driver load and possibly when
1412 * resuming from a low power state. deferred device waiting will use
1413 * interrupts. chip reset recovery always defers device waiting to
1414 * avoid being in a TASKLET too long.
1415 */
1416 if (init_poll) {
1417 u32 currtime = a->disc_start_time;
1418 u32 nexttick = 100;
1419 u32 deltatime;
1420
1421 /*
1422 * Block Tasklets from getting scheduled and indicate this is
1423 * polled discovery.
1424 */
1425 esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
1426 esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
1427
1428 /*
1429 * Temporarily bring the disable count to zero to enable
1430 * deferred processing. Note that the count is already zero
1431 * after the first initialization.
1432 */
1433 if (a->flags & AF_FIRST_INIT)
1434 atomic_dec(&a->disable_cnt);
1435
1436 while (a->flags & AF_DISC_PENDING) {
1437 schedule_timeout_interruptible(msecs_to_jiffies(100));
1438
1439 /*
1440 * Determine the need for a timer tick based on the
1441 * delta time between this and the last iteration of
1442 * this loop. We don't use the absolute time because
1443 * then we would have to worry about when nexttick
1444 * wraps and currtime hasn't yet.
1445 */
1446 deltatime = jiffies_to_msecs(jiffies) - currtime;
1447 currtime += deltatime;
1448
1449 /*
1450 * Process any waiting discovery as long as the chip is
1451 * up. If a chip reset happens during initial polling,
1452 * we have to make sure the timer tick processes the
1453 * doorbell indicating the firmware is ready.
1454 */
1455 if (!(a->flags & AF_CHPRST_PENDING))
1456 esas2r_disc_check_for_work(a);
1457
1458 /* Simulate a timer tick. */
1459 if (nexttick <= deltatime) {
1460
1461 /* Time for a timer tick */
1462 nexttick += 100;
1463 esas2r_timer_tick(a);
1464 }
1465
1466 if (nexttick > deltatime)
1467 nexttick -= deltatime;
1468
1469 /* Do any deferred processing */
1470 if (esas2r_is_tasklet_pending(a))
1471 esas2r_do_tasklet_tasks(a);
1472
1473 }
1474
1475 if (a->flags & AF_FIRST_INIT)
1476 atomic_inc(&a->disable_cnt);
1477
1478 esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
1479 esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
1480 }
1481
1482
1483 esas2r_targ_db_report_changes(a);
1484
1485 /*
1486 * For cases where (a) the initialization messages processing may
1487 * handle an interrupt for a port event and a discovery is waiting, but
1488 * we are not waiting for devices, or (b) the device wait time has been
1489 * exhausted but there is still discovery pending, start any leftover
1490 * discovery in interrupt driven mode.
1491 */
1492 esas2r_disc_start_waiting(a);
1493
1494 /* Enable chip interrupts */
1495 a->int_mask = ESAS2R_INT_STS_MASK;
1496 esas2r_enable_chip_interrupts(a);
1497 esas2r_enable_heartbeat(a);
1498 rslt = true;
1499
1500exit:
1501 /*
1502 * Regardless of whether initialization was successful, certain things
1503 * need to get done before we exit.
1504 */
1505
1506 if ((a->flags & AF_CHPRST_DETECTED)
1507 && (a->flags & AF_FIRST_INIT)) {
1508 /*
1509 * Reinitialization was performed during the first
1510 * initialization. Only clear the chip reset flag so the
1511 * original device polling is not cancelled.
1512 */
1513 if (!rslt)
1514 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
1515 } else {
1516 /* First initialization or a subsequent re-init is complete. */
1517 if (!rslt) {
1518 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
1519 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
1520 }
1521
1522
1523 /* Enable deferred processing after the first initialization. */
1524 if (a->flags & AF_FIRST_INIT) {
1525 esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
1526
1527 if (atomic_dec_return(&a->disable_cnt) == 0)
1528 esas2r_do_deferred_processes(a);
1529 }
1530 }
1531
1532 return rslt;
1533}
1534
1535void esas2r_reset_adapter(struct esas2r_adapter *a)
1536{
1537 esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
1538 esas2r_local_reset_adapter(a);
1539 esas2r_schedule_tasklet(a);
1540}
1541
1542void esas2r_reset_chip(struct esas2r_adapter *a)
1543{
1544 if (!esas2r_is_adapter_present(a))
1545 return;
1546
1547 /*
1548 * Before we reset the chip, save off the VDA core dump. The VDA core
1549 * dump is located in the upper 512KB of the onchip SRAM. Make sure
1550 * to not overwrite a previous crash that was saved.
1551 */
1552 if ((a->flags2 & AF2_COREDUMP_AVAIL)
1553 && !(a->flags2 & AF2_COREDUMP_SAVED)
1554 && a->fw_coredump_buff) {
1555 esas2r_read_mem_block(a,
1556 a->fw_coredump_buff,
1557 MW_DATA_ADDR_SRAM + 0x80000,
1558 ESAS2R_FWCOREDUMP_SZ);
1559
1560 esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
1561 }
1562
1563 esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
1564
1565 /* Reset the chip */
1566 if (a->pcid->revision == MVR_FREY_B2)
1567 esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
1568 MU_CTL_IN_FULL_RST2);
1569 else
1570 esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
1571 MU_CTL_IN_FULL_RST);
1572
1573
1574 /* Stall a little while to let the reset condition clear */
1575 mdelay(10);
1576}
1577
1578static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
1579{
1580 u32 starttime;
1581 u32 doorbell;
1582
1583 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
1584 starttime = jiffies_to_msecs(jiffies);
1585
1586 while (true) {
1587 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1588 if (doorbell & DRBL_POWER_DOWN) {
1589 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1590 doorbell);
1591 break;
1592 }
1593
1594 schedule_timeout_interruptible(msecs_to_jiffies(100));
1595
1596 if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
1597 esas2r_hdebug("Timeout waiting for power down");
1598 break;
1599 }
1600 }
1601}
1602
1603/*
1604 * Perform power management processing including managing device states, adapter
1605 * states, interrupts, and I/O.
1606 */
1607void esas2r_power_down(struct esas2r_adapter *a)
1608{
1609 esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
1610 esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
1611
1612 if (!(a->flags & AF_DEGRADED_MODE)) {
1613 u32 starttime;
1614 u32 doorbell;
1615
1616 /*
1617 * We are currently running OK and will be reinitializing later.
1618 * increment the disable count to coordinate with
1619 * esas2r_init_adapter. We don't have to do this in degraded
1620 * mode since we never enabled interrupts in the first place.
1621 */
1622 esas2r_disable_chip_interrupts(a);
1623 esas2r_disable_heartbeat(a);
1624
1625 /* wait for any VDA activity to clear before continuing */
1626 esas2r_write_register_dword(a, MU_DOORBELL_IN,
1627 DRBL_MSG_IFC_DOWN);
1628 starttime = jiffies_to_msecs(jiffies);
1629
1630 while (true) {
1631 doorbell =
1632 esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1633 if (doorbell & DRBL_MSG_IFC_DOWN) {
1634 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1635 doorbell);
1636 break;
1637 }
1638
1639 schedule_timeout_interruptible(msecs_to_jiffies(100));
1640
1641 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1642 esas2r_hdebug(
1643 "timeout waiting for interface down");
1644 break;
1645 }
1646 }
1647
1648 /*
1649 * For versions of firmware that support it tell them the driver
1650 * is powering down.
1651 */
1652 if (a->flags2 & AF2_VDA_POWER_DOWN)
1653 esas2r_power_down_notify_firmware(a);
1654 }
1655
1656 /* Suspend I/O processing. */
1657 esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
1658 esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
1659 esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
1660
1661 esas2r_process_adapter_reset(a);
1662
1663 /* Remove devices now that I/O is cleaned up. */
1664 a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
1665 esas2r_targ_db_remove_all(a, false);
1666}
1667
1668/*
1669 * Perform power management processing including managing device states, adapter
1670 * states, interrupts, and I/O.
1671 */
1672bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
1673{
1674 bool ret;
1675
1676 esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
1677 esas2r_init_pci_cfg_space(a);
1678 esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
1679 atomic_inc(&a->disable_cnt);
1680
1681 /* reinitialize the adapter */
1682 ret = esas2r_check_adapter(a);
1683 if (!esas2r_init_adapter_hw(a, init_poll))
1684 ret = false;
1685
1686 /* send the reset asynchronous event */
1687 esas2r_send_reset_ae(a, true);
1688
1689 /* clear this flag after initialization. */
1690 esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
1691 return ret;
1692}
1693
1694bool esas2r_is_adapter_present(struct esas2r_adapter *a)
1695{
1696 if (a->flags & AF_NOT_PRESENT)
1697 return false;
1698
1699 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
1700 esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
1701
1702 return false;
1703 }
1704 return true;
1705}
1706
1707const char *esas2r_get_model_name(struct esas2r_adapter *a)
1708{
1709 switch (a->pcid->subsystem_device) {
1710 case ATTO_ESAS_R680:
1711 return "ATTO ExpressSAS R680";
1712
1713 case ATTO_ESAS_R608:
1714 return "ATTO ExpressSAS R608";
1715
1716 case ATTO_ESAS_R60F:
1717 return "ATTO ExpressSAS R60F";
1718
1719 case ATTO_ESAS_R6F0:
1720 return "ATTO ExpressSAS R6F0";
1721
1722 case ATTO_ESAS_R644:
1723 return "ATTO ExpressSAS R644";
1724
1725 case ATTO_ESAS_R648:
1726 return "ATTO ExpressSAS R648";
1727
1728 case ATTO_TSSC_3808:
1729 return "ATTO ThunderStream SC 3808D";
1730
1731 case ATTO_TSSC_3808E:
1732 return "ATTO ThunderStream SC 3808E";
1733
1734 case ATTO_TLSH_1068:
1735 return "ATTO ThunderLink SH 1068";
1736 }
1737
1738 return "ATTO SAS Controller";
1739}
1740
1741const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
1742{
1743 switch (a->pcid->subsystem_device) {
1744 case ATTO_ESAS_R680:
1745 return "R680";
1746
1747 case ATTO_ESAS_R608:
1748 return "R608";
1749
1750 case ATTO_ESAS_R60F:
1751 return "R60F";
1752
1753 case ATTO_ESAS_R6F0:
1754 return "R6F0";
1755
1756 case ATTO_ESAS_R644:
1757 return "R644";
1758
1759 case ATTO_ESAS_R648:
1760 return "R648";
1761
1762 case ATTO_TSSC_3808:
1763 return "SC 3808D";
1764
1765 case ATTO_TSSC_3808E:
1766 return "SC 3808E";
1767
1768 case ATTO_TLSH_1068:
1769 return "SH 1068";
1770 }
1771
1772 return "unknown";
1773}
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
new file mode 100644
index 000000000000..c2d4ff57c5c3
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -0,0 +1,941 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_int.c
3 * esas2r interrupt handling
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44
45#include "esas2r.h"
46
47/* Local function prototypes */
48static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
49static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
50static void esas2r_process_bus_reset(struct esas2r_adapter *a);
51
52/*
53 * Poll the adapter for interrupts and service them.
54 * This function handles both legacy interrupts and MSI.
55 */
56void esas2r_polled_interrupt(struct esas2r_adapter *a)
57{
58 u32 intstat;
59 u32 doorbell;
60
61 esas2r_disable_chip_interrupts(a);
62
63 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
64
65 if (intstat & MU_INTSTAT_POST_OUT) {
66 /* clear the interrupt */
67
68 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
69 MU_OLIS_INT);
70 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
71
72 esas2r_get_outbound_responses(a);
73 }
74
75 if (intstat & MU_INTSTAT_DRBL) {
76 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
77 if (doorbell != 0)
78 esas2r_doorbell_interrupt(a, doorbell);
79 }
80
81 esas2r_enable_chip_interrupts(a);
82
83 if (atomic_read(&a->disable_cnt) == 0)
84 esas2r_do_deferred_processes(a);
85}
86
87/*
88 * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
89 * schedules a TASKLET to process events, whereas the MSI handler just
90 * processes interrupt events directly.
91 */
92irqreturn_t esas2r_interrupt(int irq, void *dev_id)
93{
94 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
95
96 if (!esas2r_adapter_interrupt_pending(a))
97 return IRQ_NONE;
98
99 esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
100 esas2r_schedule_tasklet(a);
101
102 return IRQ_HANDLED;
103}
104
105void esas2r_adapter_interrupt(struct esas2r_adapter *a)
106{
107 u32 doorbell;
108
109 if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
110 /* clear the interrupt */
111 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
112 MU_OLIS_INT);
113 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
114 esas2r_get_outbound_responses(a);
115 }
116
117 if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
118 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
119 if (doorbell != 0)
120 esas2r_doorbell_interrupt(a, doorbell);
121 }
122
123 a->int_mask = ESAS2R_INT_STS_MASK;
124
125 esas2r_enable_chip_interrupts(a);
126
127 if (likely(atomic_read(&a->disable_cnt) == 0))
128 esas2r_do_deferred_processes(a);
129}
130
131irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
132{
133 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
134 u32 intstat;
135 u32 doorbell;
136
137 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
138
139 if (likely(intstat & MU_INTSTAT_POST_OUT)) {
140 /* clear the interrupt */
141
142 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
143 MU_OLIS_INT);
144 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
145
146 esas2r_get_outbound_responses(a);
147 }
148
149 if (unlikely(intstat & MU_INTSTAT_DRBL)) {
150 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
151 if (doorbell != 0)
152 esas2r_doorbell_interrupt(a, doorbell);
153 }
154
155 /*
156 * Work around a chip bug and force a new MSI to be sent if one is
157 * still pending.
158 */
159 esas2r_disable_chip_interrupts(a);
160 esas2r_enable_chip_interrupts(a);
161
162 if (likely(atomic_read(&a->disable_cnt) == 0))
163 esas2r_do_deferred_processes(a);
164
165 esas2r_do_tasklet_tasks(a);
166
167 return 1;
168}
169
170
171
172static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
173 struct esas2r_request *rq,
174 struct atto_vda_ob_rsp *rsp)
175{
176
177 /*
178 * For I/O requests, only copy the response if an error
179 * occurred and setup a callback to do error processing.
180 */
181 if (unlikely(rq->req_stat != RS_SUCCESS)) {
182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
183
184 if (rq->req_stat == RS_ABORTED) {
185 if (rq->timeout > RQ_MAX_TIMEOUT)
186 rq->req_stat = RS_TIMEOUT;
187 } else if (rq->req_stat == RS_SCSI_ERROR) {
188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
189
190 esas2r_trace("scsistatus: %x", scsistatus);
191
192 /* Any of these are a good result. */
193 if (scsistatus == SAM_STAT_GOOD || scsistatus ==
194 SAM_STAT_CONDITION_MET || scsistatus ==
195 SAM_STAT_INTERMEDIATE || scsistatus ==
196 SAM_STAT_INTERMEDIATE_CONDITION_MET) {
197 rq->req_stat = RS_SUCCESS;
198 rq->func_rsp.scsi_rsp.scsi_stat =
199 SAM_STAT_GOOD;
200 }
201 }
202 }
203}
204
205static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
206{
207 struct atto_vda_ob_rsp *rsp;
208 u32 rspput_ptr;
209 u32 rspget_ptr;
210 struct esas2r_request *rq;
211 u32 handle;
212 unsigned long flags;
213
214 LIST_HEAD(comp_list);
215
216 esas2r_trace_enter();
217
218 spin_lock_irqsave(&a->queue_lock, flags);
219
220 /* Get the outbound limit and pointers */
221 rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
222 rspget_ptr = a->last_read;
223
224 esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
225
226 /* If we don't have anything to process, get out */
227 if (unlikely(rspget_ptr == rspput_ptr)) {
228 spin_unlock_irqrestore(&a->queue_lock, flags);
229 esas2r_trace_exit();
230 return;
231 }
232
233 /* Make sure the firmware is healthy */
234 if (unlikely(rspput_ptr >= a->list_size)) {
235 spin_unlock_irqrestore(&a->queue_lock, flags);
236 esas2r_bugon();
237 esas2r_local_reset_adapter(a);
238 esas2r_trace_exit();
239 return;
240 }
241
242 do {
243 rspget_ptr++;
244
245 if (rspget_ptr >= a->list_size)
246 rspget_ptr = 0;
247
248 rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
249 + rspget_ptr;
250
251 handle = rsp->handle;
252
253 /* Verify the handle range */
254 if (unlikely(LOWORD(handle) == 0
255 || LOWORD(handle) > num_requests +
256 num_ae_requests + 1)) {
257 esas2r_bugon();
258 continue;
259 }
260
261 /* Get the request for this handle */
262 rq = a->req_table[LOWORD(handle)];
263
264 if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
265 esas2r_bugon();
266 continue;
267 }
268
269 list_del(&rq->req_list);
270
271 /* Get the completion status */
272 rq->req_stat = rsp->req_stat;
273
274 esas2r_trace("handle: %x", handle);
275 esas2r_trace("rq: %p", rq);
276 esas2r_trace("req_status: %x", rq->req_stat);
277
278 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
279 esas2r_handle_outbound_rsp_err(a, rq, rsp);
280 } else {
281 /*
282 * Copy the outbound completion struct for non-I/O
283 * requests.
284 */
285 memcpy(&rq->func_rsp, &rsp->func_rsp,
286 sizeof(rsp->func_rsp));
287 }
288
289 /* Queue the request for completion. */
290 list_add_tail(&rq->comp_list, &comp_list);
291
292 } while (rspget_ptr != rspput_ptr);
293
294 a->last_read = rspget_ptr;
295 spin_unlock_irqrestore(&a->queue_lock, flags);
296
297 esas2r_comp_list_drain(a, &comp_list);
298 esas2r_trace_exit();
299}
300
301/*
302 * Perform all deferred processes for the adapter. Deferred
303 * processes can only be done while the current interrupt
304 * disable_cnt for the adapter is zero.
305 */
306void esas2r_do_deferred_processes(struct esas2r_adapter *a)
307{
308 int startreqs = 2;
309 struct esas2r_request *rq;
310 unsigned long flags;
311
312 /*
313 * startreqs is used to control starting requests
314 * that are on the deferred queue
315 * = 0 - do not start any requests
316 * = 1 - can start discovery requests
317 * = 2 - can start any request
318 */
319
320 if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
321 startreqs = 0;
322 else if (a->flags & AF_DISC_PENDING)
323 startreqs = 1;
324
325 atomic_inc(&a->disable_cnt);
326
327 /* Clear off the completed list to be processed later. */
328
329 if (esas2r_is_tasklet_pending(a)) {
330 esas2r_schedule_tasklet(a);
331
332 startreqs = 0;
333 }
334
335 /*
336 * If we can start requests then traverse the defer queue
337 * looking for requests to start or complete
338 */
339 if (startreqs && !list_empty(&a->defer_list)) {
340 LIST_HEAD(comp_list);
341 struct list_head *element, *next;
342
343 spin_lock_irqsave(&a->queue_lock, flags);
344
345 list_for_each_safe(element, next, &a->defer_list) {
346 rq = list_entry(element, struct esas2r_request,
347 req_list);
348
349 if (rq->req_stat != RS_PENDING) {
350 list_del(element);
351 list_add_tail(&rq->comp_list, &comp_list);
352 }
353 /*
354 * Process discovery and OS requests separately. We
355 * can't hold up discovery requests when discovery is
356 * pending. In general, there may be different sets of
357 * conditions for starting different types of requests.
358 */
359 else if (rq->req_type == RT_DISC_REQ) {
360 list_del(element);
361 esas2r_disc_local_start_request(a, rq);
362 } else if (startreqs == 2) {
363 list_del(element);
364 esas2r_local_start_request(a, rq);
365
366 /*
367 * Flashing could have been set by last local
368 * start
369 */
370 if (a->flags & AF_FLASHING)
371 break;
372 }
373 }
374
375 spin_unlock_irqrestore(&a->queue_lock, flags);
376 esas2r_comp_list_drain(a, &comp_list);
377 }
378
379 atomic_dec(&a->disable_cnt);
380}
381
382/*
383 * Process an adapter reset (or one that is about to happen)
384 * by making sure all outstanding requests are completed that
385 * haven't been already.
386 */
387void esas2r_process_adapter_reset(struct esas2r_adapter *a)
388{
389 struct esas2r_request *rq = &a->general_req;
390 unsigned long flags;
391 struct esas2r_disc_context *dc;
392
393 LIST_HEAD(comp_list);
394 struct list_head *element;
395
396 esas2r_trace_enter();
397
398 spin_lock_irqsave(&a->queue_lock, flags);
399
400 /* abort the active discovery, if any. */
401
402 if (rq->interrupt_cx) {
403 dc = (struct esas2r_disc_context *)rq->interrupt_cx;
404
405 dc->disc_evt = 0;
406
407 esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
408 }
409
410 /*
411 * just clear the interrupt callback for now. it will be dequeued if
412 * and when we find it on the active queue and we don't want the
413 * callback called. also set the dummy completion callback in case we
414 * were doing an I/O request.
415 */
416
417 rq->interrupt_cx = NULL;
418 rq->interrupt_cb = NULL;
419
420 rq->comp_cb = esas2r_dummy_complete;
421
422 /* Reset the read and write pointers */
423
424 *a->outbound_copy =
425 a->last_write =
426 a->last_read = a->list_size - 1;
427
428 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
429
430 /* Kill all the requests on the active list */
431 list_for_each(element, &a->defer_list) {
432 rq = list_entry(element, struct esas2r_request, req_list);
433
434 if (rq->req_stat == RS_STARTED)
435 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
436 list_add_tail(&rq->comp_list, &comp_list);
437 }
438
439 spin_unlock_irqrestore(&a->queue_lock, flags);
440 esas2r_comp_list_drain(a, &comp_list);
441 esas2r_process_bus_reset(a);
442 esas2r_trace_exit();
443}
444
445static void esas2r_process_bus_reset(struct esas2r_adapter *a)
446{
447 struct esas2r_request *rq;
448 struct list_head *element;
449 unsigned long flags;
450
451 LIST_HEAD(comp_list);
452
453 esas2r_trace_enter();
454
455 esas2r_hdebug("reset detected");
456
457 spin_lock_irqsave(&a->queue_lock, flags);
458
459 /* kill all the requests on the deferred queue */
460 list_for_each(element, &a->defer_list) {
461 rq = list_entry(element, struct esas2r_request, req_list);
462 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
463 list_add_tail(&rq->comp_list, &comp_list);
464 }
465
466 spin_unlock_irqrestore(&a->queue_lock, flags);
467
468 esas2r_comp_list_drain(a, &comp_list);
469
470 if (atomic_read(&a->disable_cnt) == 0)
471 esas2r_do_deferred_processes(a);
472
473 esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
474
475 esas2r_trace_exit();
476}
477
478static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
479{
480
481 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
482 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
483 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
484 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
485 /*
486 * Make sure we don't get attempt more than 3 resets
487 * when the uptime between resets does not exceed one
488 * minute. This will stop any situation where there is
489 * really something wrong with the hardware. The way
490 * this works is that we start with uptime ticks at 0.
491 * Each time we do a reset, we add 20 seconds worth to
492 * the count. Each time a timer tick occurs, as long
493 * as a chip reset is not pending, we decrement the
494 * tick count. If the uptime ticks ever gets to 60
495 * seconds worth, we disable the adapter from that
496 * point forward. Three strikes, you're out.
497 */
498 if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
499 ESAS2R_CHP_UPTIME_MAX)) {
500 esas2r_hdebug("*** adapter disabled ***");
501
502 /*
503 * Ok, some kind of hard failure. Make sure we
504 * exit this loop with chip interrupts
505 * permanently disabled so we don't lock up the
506 * entire system. Also flag degraded mode to
507 * prevent the heartbeat from trying to recover.
508 */
509
510 esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
511 esas2r_lock_set_flags(&a->flags, AF_DISABLED);
512 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
513 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
514
515 esas2r_disable_chip_interrupts(a);
516 a->int_mask = 0;
517 esas2r_process_adapter_reset(a);
518
519 esas2r_log(ESAS2R_LOG_CRIT,
520 "Adapter disabled because of hardware failure");
521 } else {
522 u32 flags =
523 esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
524
525 if (!(flags & AF_CHPRST_STARTED))
526 /*
527 * Only disable interrupts if this is
528 * the first reset attempt.
529 */
530 esas2r_disable_chip_interrupts(a);
531
532 if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
533 !(flags & AF_CHPRST_STARTED)) {
534 /*
535 * Don't reset the chip on the first
536 * deferred power up attempt.
537 */
538 } else {
539 esas2r_hdebug("*** resetting chip ***");
540 esas2r_reset_chip(a);
541 }
542
543 /* Kick off the reinitialization */
544 a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
545 a->chip_init_time = jiffies_to_msecs(jiffies);
546 if (!(a->flags & AF_POWER_MGT)) {
547 esas2r_process_adapter_reset(a);
548
549 if (!(flags & AF_CHPRST_STARTED)) {
550 /* Remove devices now that I/O is cleaned up. */
551 a->prev_dev_cnt =
552 esas2r_targ_db_get_tgt_cnt(a);
553 esas2r_targ_db_remove_all(a, false);
554 }
555 }
556
557 a->int_mask = 0;
558 }
559}
560
561static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
562{
563 while (a->flags & AF_CHPRST_DETECTED) {
564 /*
565 * Balance the enable in esas2r_initadapter_hw.
566 * Esas2r_power_down already took care of it for power
567 * management.
568 */
569 if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
570 AF_POWER_MGT))
571 esas2r_disable_chip_interrupts(a);
572
573 /* Reinitialize the chip. */
574 esas2r_check_adapter(a);
575 esas2r_init_adapter_hw(a, 0);
576
577 if (a->flags & AF_CHPRST_NEEDED)
578 break;
579
580 if (a->flags & AF_POWER_MGT) {
581 /* Recovery from power management. */
582 if (a->flags & AF_FIRST_INIT) {
583 /* Chip reset during normal power up */
584 esas2r_log(ESAS2R_LOG_CRIT,
585 "The firmware was reset during a normal power-up sequence");
586 } else {
587 /* Deferred power up complete. */
588 esas2r_lock_clear_flags(&a->flags,
589 AF_POWER_MGT);
590 esas2r_send_reset_ae(a, true);
591 }
592 } else {
593 /* Recovery from online chip reset. */
594 if (a->flags & AF_FIRST_INIT) {
595 /* Chip reset during driver load */
596 } else {
597 /* Chip reset after driver load */
598 esas2r_send_reset_ae(a, false);
599 }
600
601 esas2r_log(ESAS2R_LOG_CRIT,
602 "Recovering from a chip reset while the chip was online");
603 }
604
605 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
606 esas2r_enable_chip_interrupts(a);
607
608 /*
609 * Clear this flag last! this indicates that the chip has been
610 * reset already during initialization.
611 */
612 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
613 }
614}
615
616
617/* Perform deferred tasks when chip interrupts are disabled */
618void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
619{
620 if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
621 if (a->flags & AF_CHPRST_NEEDED)
622 esas2r_chip_rst_needed_during_tasklet(a);
623
624 esas2r_handle_chip_rst_during_tasklet(a);
625 }
626
627 if (a->flags & AF_BUSRST_NEEDED) {
628 esas2r_hdebug("hard resetting bus");
629
630 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
631
632 if (a->flags & AF_FLASHING)
633 esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
634 else
635 esas2r_write_register_dword(a, MU_DOORBELL_IN,
636 DRBL_RESET_BUS);
637 }
638
639 if (a->flags & AF_BUSRST_DETECTED) {
640 esas2r_process_bus_reset(a);
641
642 esas2r_log_dev(ESAS2R_LOG_WARN,
643 &(a->host->shost_gendev),
644 "scsi_report_bus_reset() called");
645
646 scsi_report_bus_reset(a->host, 0);
647
648 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
649 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
650
651 esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
652 }
653
654 if (a->flags & AF_PORT_CHANGE) {
655 esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
656
657 esas2r_targ_db_report_changes(a);
658 }
659
660 if (atomic_read(&a->disable_cnt) == 0)
661 esas2r_do_deferred_processes(a);
662}
663
664static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
665{
666 if (!(doorbell & DRBL_FORCE_INT)) {
667 esas2r_trace_enter();
668 esas2r_trace("doorbell: %x", doorbell);
669 }
670
671 /* First clear the doorbell bits */
672 esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
673
674 if (doorbell & DRBL_RESET_BUS)
675 esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
676
677 if (doorbell & DRBL_FORCE_INT)
678 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
679
680 if (doorbell & DRBL_PANIC_REASON_MASK) {
681 esas2r_hdebug("*** Firmware Panic ***");
682 esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
683 }
684
685 if (doorbell & DRBL_FW_RESET) {
686 esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
687 esas2r_local_reset_adapter(a);
688 }
689
690 if (!(doorbell & DRBL_FORCE_INT))
691 esas2r_trace_exit();
692}
693
694void esas2r_force_interrupt(struct esas2r_adapter *a)
695{
696 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
697 DRBL_DRV_VER);
698}
699
700
701static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
702 u16 target, u32 length)
703{
704 struct esas2r_target *t = a->targetdb + target;
705 u32 cplen = length;
706 unsigned long flags;
707
708 if (cplen > sizeof(t->lu_event))
709 cplen = sizeof(t->lu_event);
710
711 esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
712 esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
713
714 spin_lock_irqsave(&a->mem_lock, flags);
715
716 t->new_target_state = TS_INVALID;
717
718 if (ae->lu.dwevent & VDAAE_LU_LOST) {
719 t->new_target_state = TS_NOT_PRESENT;
720 } else {
721 switch (ae->lu.bystate) {
722 case VDAAE_LU_NOT_PRESENT:
723 case VDAAE_LU_OFFLINE:
724 case VDAAE_LU_DELETED:
725 case VDAAE_LU_FACTORY_DISABLED:
726 t->new_target_state = TS_NOT_PRESENT;
727 break;
728
729 case VDAAE_LU_ONLINE:
730 case VDAAE_LU_DEGRADED:
731 t->new_target_state = TS_PRESENT;
732 break;
733 }
734 }
735
736 if (t->new_target_state != TS_INVALID) {
737 memcpy(&t->lu_event, &ae->lu, cplen);
738
739 esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
740 }
741
742 spin_unlock_irqrestore(&a->mem_lock, flags);
743}
744
745
746
747void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
748{
749 union atto_vda_ae *ae =
750 (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
751 u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
752 union atto_vda_ae *last =
753 (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
754 + length);
755
756 esas2r_trace_enter();
757 esas2r_trace("length: %d", length);
758
759 if (length > sizeof(struct atto_vda_ae_data)
760 || (length & 3) != 0
761 || length == 0) {
762 esas2r_log(ESAS2R_LOG_WARN,
763 "The AE request response length (%p) is too long: %d",
764 rq, length);
765
766 esas2r_hdebug("aereq->length (0x%x) too long", length);
767 esas2r_bugon();
768
769 last = ae;
770 }
771
772 while (ae < last) {
773 u16 target;
774
775 esas2r_trace("ae: %p", ae);
776 esas2r_trace("ae->hdr: %p", &(ae->hdr));
777
778 length = ae->hdr.bylength;
779
780 if (length > (u32)((u8 *)last - (u8 *)ae)
781 || (length & 3) != 0
782 || length == 0) {
783 esas2r_log(ESAS2R_LOG_CRIT,
784 "the async event length is invalid (%p): %d",
785 ae, length);
786
787 esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
788 esas2r_bugon();
789
790 break;
791 }
792
793 esas2r_nuxi_ae_data(ae);
794
795 esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
796 sizeof(union atto_vda_ae));
797
798 switch (ae->hdr.bytype) {
799 case VDAAE_HDR_TYPE_RAID:
800
801 if (ae->raid.dwflags & (VDAAE_GROUP_STATE
802 | VDAAE_RBLD_STATE
803 | VDAAE_MEMBER_CHG
804 | VDAAE_PART_CHG)) {
805 esas2r_log(ESAS2R_LOG_INFO,
806 "RAID event received - name:%s rebuild_state:%d group_state:%d",
807 ae->raid.acname,
808 ae->raid.byrebuild_state,
809 ae->raid.bygroup_state);
810 }
811
812 break;
813
814 case VDAAE_HDR_TYPE_LU:
815 esas2r_log(ESAS2R_LOG_INFO,
816 "LUN event received: event:%d target_id:%d LUN:%d state:%d",
817 ae->lu.dwevent,
818 ae->lu.id.tgtlun.wtarget_id,
819 ae->lu.id.tgtlun.bylun,
820 ae->lu.bystate);
821
822 target = ae->lu.id.tgtlun.wtarget_id;
823
824 if (target < ESAS2R_MAX_TARGETS)
825 esas2r_lun_event(a, ae, target, length);
826
827 break;
828
829 case VDAAE_HDR_TYPE_DISK:
830 esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
831 break;
832
833 default:
834
835 /* Silently ignore the rest and let the apps deal with
836 * them.
837 */
838
839 break;
840 }
841
842 ae = (union atto_vda_ae *)((u8 *)ae + length);
843 }
844
845 /* Now requeue it. */
846 esas2r_start_ae_request(a, rq);
847 esas2r_trace_exit();
848}
849
850/* Send an asynchronous event for a chip reset or power management. */
851void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
852{
853 struct atto_vda_ae_hdr ae;
854
855 if (pwr_mgt)
856 ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
857 else
858 ae.bytype = VDAAE_HDR_TYPE_RESET;
859
860 ae.byversion = VDAAE_HDR_VER_0;
861 ae.byflags = 0;
862 ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
863
864 if (pwr_mgt)
865 esas2r_hdebug("*** sending power management AE ***");
866 else
867 esas2r_hdebug("*** sending reset AE ***");
868
869 esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
870 sizeof(union atto_vda_ae));
871}
872
873void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
874{}
875
876static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
877 struct esas2r_request *rq)
878{
879 u8 snslen, snslen2;
880
881 snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
882
883 if (snslen > rq->sense_len)
884 snslen = rq->sense_len;
885
886 if (snslen) {
887 if (rq->sense_buf)
888 memcpy(rq->sense_buf, rq->data_buf, snslen);
889 else
890 rq->sense_buf = (u8 *)rq->data_buf;
891
892 /* See about possible sense data */
893 if (snslen2 > 0x0c) {
894 u8 *s = (u8 *)rq->data_buf;
895
896 esas2r_trace_enter();
897
898 /* Report LUNS data has changed */
899 if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
900 esas2r_trace("rq->target_id: %d",
901 rq->target_id);
902 esas2r_target_state_changed(a, rq->target_id,
903 TS_LUN_CHANGE);
904 }
905
906 esas2r_trace("add_sense_key=%x", s[0x0c]);
907 esas2r_trace("add_sense_qual=%x", s[0x0d]);
908 esas2r_trace_exit();
909 }
910 }
911
912 rq->sense_len = snslen;
913}
914
915
916void esas2r_complete_request(struct esas2r_adapter *a,
917 struct esas2r_request *rq)
918{
919 if (rq->vrq->scsi.function == VDA_FUNC_FLASH
920 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
921 esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
922
923 /* See if we setup a callback to do special processing */
924
925 if (rq->interrupt_cb) {
926 (*rq->interrupt_cb)(a, rq);
927
928 if (rq->req_stat == RS_PENDING) {
929 esas2r_start_request(a, rq);
930 return;
931 }
932 }
933
934 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
935 && unlikely(rq->req_stat != RS_SUCCESS)) {
936 esas2r_check_req_rsp_sense(a, rq);
937 esas2r_log_request_failure(a, rq);
938 }
939
940 (*rq->comp_cb)(a, rq);
941}
diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c
new file mode 100644
index 000000000000..324e2626a08b
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_io.c
@@ -0,0 +1,880 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_io.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
47{
48 struct esas2r_target *t = NULL;
49 struct esas2r_request *startrq = rq;
50 unsigned long flags;
51
52 if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
53 if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
54 rq->req_stat = RS_SEL2;
55 else
56 rq->req_stat = RS_DEGRADED;
57 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
58 t = a->targetdb + rq->target_id;
59
60 if (unlikely(t >= a->targetdb_end
61 || !(t->flags & TF_USED))) {
62 rq->req_stat = RS_SEL;
63 } else {
64 /* copy in the target ID. */
65 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
66
67 /*
68 * Test if we want to report RS_SEL for missing target.
69 * Note that if AF_DISC_PENDING is set than this will
70 * go on the defer queue.
71 */
72 if (unlikely(t->target_state != TS_PRESENT
73 && !(a->flags & AF_DISC_PENDING)))
74 rq->req_stat = RS_SEL;
75 }
76 }
77
78 if (unlikely(rq->req_stat != RS_PENDING)) {
79 esas2r_complete_request(a, rq);
80 return;
81 }
82
83 esas2r_trace("rq=%p", rq);
84 esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
85
86 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
87 esas2r_trace("rq->target_id=%d", rq->target_id);
88 esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
89 }
90
91 spin_lock_irqsave(&a->queue_lock, flags);
92
93 if (likely(list_empty(&a->defer_list) &&
94 !(a->flags &
95 (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
96 esas2r_local_start_request(a, startrq);
97 else
98 list_add_tail(&startrq->req_list, &a->defer_list);
99
100 spin_unlock_irqrestore(&a->queue_lock, flags);
101}
102
103/*
104 * Starts the specified request. all requests have RS_PENDING set when this
105 * routine is called. The caller is usually esas2r_start_request, but
106 * esas2r_do_deferred_processes will start request that are deferred.
107 *
108 * The caller must ensure that requests can be started.
109 *
110 * esas2r_start_request will defer a request if there are already requests
111 * waiting or there is a chip reset pending. once the reset condition clears,
112 * esas2r_do_deferred_processes will call this function to start the request.
113 *
114 * When a request is started, it is placed on the active list and queued to
115 * the controller.
116 */
117void esas2r_local_start_request(struct esas2r_adapter *a,
118 struct esas2r_request *rq)
119{
120 esas2r_trace_enter();
121 esas2r_trace("rq=%p", rq);
122 esas2r_trace("rq->vrq:%p", rq->vrq);
123 esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
124
125 if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
126 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
127 esas2r_lock_set_flags(&a->flags, AF_FLASHING);
128
129 list_add_tail(&rq->req_list, &a->active_list);
130 esas2r_start_vda_request(a, rq);
131 esas2r_trace_exit();
132 return;
133}
134
135void esas2r_start_vda_request(struct esas2r_adapter *a,
136 struct esas2r_request *rq)
137{
138 struct esas2r_inbound_list_source_entry *element;
139 u32 dw;
140
141 rq->req_stat = RS_STARTED;
142 /*
143 * Calculate the inbound list entry location and the current state of
144 * toggle bit.
145 */
146 a->last_write++;
147 if (a->last_write >= a->list_size) {
148 a->last_write = 0;
149 /* update the toggle bit */
150 if (a->flags & AF_COMM_LIST_TOGGLE)
151 esas2r_lock_clear_flags(&a->flags,
152 AF_COMM_LIST_TOGGLE);
153 else
154 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
155 }
156
157 element =
158 (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
159 virt_addr
160 + a->last_write;
161
162 /* Set the VDA request size if it was never modified */
163 if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
164 rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
165
166 element->address = cpu_to_le64(rq->vrq_md->phys_addr);
167 element->length = cpu_to_le32(rq->vda_req_sz);
168
169 /* Update the write pointer */
170 dw = a->last_write;
171
172 if (a->flags & AF_COMM_LIST_TOGGLE)
173 dw |= MU_ILW_TOGGLE;
174
175 esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
176 esas2r_trace("dw:%x", dw);
177 esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
178 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
179}
180
181/*
182 * Build the scatter/gather list for an I/O request according to the
183 * specifications placed in the s/g context. The caller must initialize
184 * context prior to the initial call by calling esas2r_sgc_init().
185 */
186bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
187 struct esas2r_sg_context *sgc)
188{
189 struct esas2r_request *rq = sgc->first_req;
190 union atto_vda_req *vrq = rq->vrq;
191
192 while (sgc->length) {
193 u32 rem = 0;
194 u64 addr;
195 u32 len;
196
197 len = (*sgc->get_phys_addr)(sgc, &addr);
198
199 if (unlikely(len == 0))
200 return false;
201
202 /* if current length is more than what's left, stop there */
203 if (unlikely(len > sgc->length))
204 len = sgc->length;
205
206another_entry:
207 /* limit to a round number less than the maximum length */
208 if (len > SGE_LEN_MAX) {
209 /*
210 * Save the remainder of the split. Whenever we limit
211 * an entry we come back around to build entries out
212 * of the leftover. We do this to prevent multiple
213 * calls to the get_phys_addr() function for an SGE
214 * that is too large.
215 */
216 rem = len - SGE_LEN_MAX;
217 len = SGE_LEN_MAX;
218 }
219
220 /* See if we need to allocate a new SGL */
221 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
222 u8 sgelen;
223 struct esas2r_mem_desc *sgl;
224
225 /*
226 * If no SGls are available, return failure. The
227 * caller can call us later with the current context
228 * to pick up here.
229 */
230 sgl = esas2r_alloc_sgl(a);
231
232 if (unlikely(sgl == NULL))
233 return false;
234
235 /* Calculate the length of the last SGE filled in */
236 sgelen = (u8)((u8 *)sgc->sge.a64.curr
237 - (u8 *)sgc->sge.a64.last);
238
239 /*
240 * Copy the last SGE filled in to the first entry of
241 * the new SGL to make room for the chain entry.
242 */
243 memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
244
245 /* Figure out the new curr pointer in the new segment */
246 sgc->sge.a64.curr =
247 (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
248 sgelen);
249
250 /* Set the limit pointer and build the chain entry */
251 sgc->sge.a64.limit =
252 (struct atto_vda_sge *)((u8 *)sgl->virt_addr
253 + sgl_page_size
254 - sizeof(struct
255 atto_vda_sge));
256 sgc->sge.a64.last->length = cpu_to_le32(
257 SGE_CHAIN | SGE_ADDR_64);
258 sgc->sge.a64.last->address =
259 cpu_to_le64(sgl->phys_addr);
260
261 /*
262 * Now, if there was a previous chain entry, then
263 * update it to contain the length of this segment
264 * and size of this chain. otherwise this is the
265 * first SGL, so set the chain_offset in the request.
266 */
267 if (sgc->sge.a64.chain) {
268 sgc->sge.a64.chain->length |=
269 cpu_to_le32(
270 ((u8 *)(sgc->sge.a64.
271 last + 1)
272 - (u8 *)rq->sg_table->
273 virt_addr)
274 + sizeof(struct atto_vda_sge) *
275 LOBIT(SGE_CHAIN_SZ));
276 } else {
277 vrq->scsi.chain_offset = (u8)
278 ((u8 *)sgc->
279 sge.a64.last -
280 (u8 *)vrq);
281
282 /*
283 * This is the first SGL, so set the
284 * chain_offset and the VDA request size in
285 * the request.
286 */
287 rq->vda_req_sz =
288 (vrq->scsi.chain_offset +
289 sizeof(struct atto_vda_sge) +
290 3)
291 / sizeof(u32);
292 }
293
294 /*
295 * Remember this so when we get a new SGL filled in we
296 * can update the length of this chain entry.
297 */
298 sgc->sge.a64.chain = sgc->sge.a64.last;
299
300 /* Now link the new SGL onto the primary request. */
301 list_add(&sgl->next_desc, &rq->sg_table_head);
302 }
303
304 /* Update last one filled in */
305 sgc->sge.a64.last = sgc->sge.a64.curr;
306
307 /* Build the new SGE and update the S/G context */
308 sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
309 sgc->sge.a64.curr->address = cpu_to_le32(addr);
310 sgc->sge.a64.curr++;
311 sgc->cur_offset += len;
312 sgc->length -= len;
313
314 /*
315 * Check if we previously split an entry. If so we have to
316 * pick up where we left off.
317 */
318 if (rem) {
319 addr += len;
320 len = rem;
321 rem = 0;
322 goto another_entry;
323 }
324 }
325
326 /* Mark the end of the SGL */
327 sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
328
329 /*
330 * If there was a previous chain entry, update the length to indicate
331 * the length of this last segment.
332 */
333 if (sgc->sge.a64.chain) {
334 sgc->sge.a64.chain->length |= cpu_to_le32(
335 ((u8 *)(sgc->sge.a64.curr) -
336 (u8 *)rq->sg_table->virt_addr));
337 } else {
338 u16 reqsize;
339
340 /*
341 * The entire VDA request was not used so lets
342 * set the size of the VDA request to be DMA'd
343 */
344 reqsize =
345 ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
346 + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
347
348 /*
349 * Only update the request size if it is bigger than what is
350 * already there. We can come in here twice for some management
351 * commands.
352 */
353 if (reqsize > rq->vda_req_sz)
354 rq->vda_req_sz = reqsize;
355 }
356 return true;
357}
358
359
360/*
361 * Create PRD list for each I-block consumed by the command. This routine
362 * determines how much data is required from each I-block being consumed
363 * by the command. The first and last I-blocks can be partials and all of
364 * the I-blocks in between are for a full I-block of data.
365 *
366 * The interleave size is used to determine the number of bytes in the 1st
367 * I-block and the remaining I-blocks are what remeains.
368 */
369static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
370 struct esas2r_sg_context *sgc)
371{
372 struct esas2r_request *rq = sgc->first_req;
373 u64 addr;
374 u32 len;
375 struct esas2r_mem_desc *sgl;
376 u32 numchain = 1;
377 u32 rem = 0;
378
379 while (sgc->length) {
380 /* Get the next address/length pair */
381
382 len = (*sgc->get_phys_addr)(sgc, &addr);
383
384 if (unlikely(len == 0))
385 return false;
386
387 /* If current length is more than what's left, stop there */
388
389 if (unlikely(len > sgc->length))
390 len = sgc->length;
391
392another_entry:
393 /* Limit to a round number less than the maximum length */
394
395 if (len > PRD_LEN_MAX) {
396 /*
397 * Save the remainder of the split. whenever we limit
398 * an entry we come back around to build entries out
399 * of the leftover. We do this to prevent multiple
400 * calls to the get_phys_addr() function for an SGE
401 * that is too large.
402 */
403 rem = len - PRD_LEN_MAX;
404 len = PRD_LEN_MAX;
405 }
406
407 /* See if we need to allocate a new SGL */
408 if (sgc->sge.prd.sge_cnt == 0) {
409 if (len == sgc->length) {
410 /*
411 * We only have 1 PRD entry left.
412 * It can be placed where the chain
413 * entry would have gone
414 */
415
416 /* Build the simple SGE */
417 sgc->sge.prd.curr->ctl_len = cpu_to_le32(
418 PRD_DATA | len);
419 sgc->sge.prd.curr->address = cpu_to_le64(addr);
420
421 /* Adjust length related fields */
422 sgc->cur_offset += len;
423 sgc->length -= len;
424
425 /* We use the reserved chain entry for data */
426 numchain = 0;
427
428 break;
429 }
430
431 if (sgc->sge.prd.chain) {
432 /*
433 * Fill # of entries of current SGL in previous
434 * chain the length of this current SGL may not
435 * full.
436 */
437
438 sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
439 sgc->sge.prd.sgl_max_cnt);
440 }
441
442 /*
443 * If no SGls are available, return failure. The
444 * caller can call us later with the current context
445 * to pick up here.
446 */
447
448 sgl = esas2r_alloc_sgl(a);
449
450 if (unlikely(sgl == NULL))
451 return false;
452
453 /*
454 * Link the new SGL onto the chain
455 * They are in reverse order
456 */
457 list_add(&sgl->next_desc, &rq->sg_table_head);
458
459 /*
460 * An SGL was just filled in and we are starting
461 * a new SGL. Prime the chain of the ending SGL with
462 * info that points to the new SGL. The length gets
463 * filled in when the new SGL is filled or ended
464 */
465
466 sgc->sge.prd.chain = sgc->sge.prd.curr;
467
468 sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
469 sgc->sge.prd.chain->address =
470 cpu_to_le64(sgl->phys_addr);
471
472 /*
473 * Start a new segment.
474 * Take one away and save for chain SGE
475 */
476
477 sgc->sge.prd.curr =
478 (struct atto_physical_region_description *)sgl
479 ->
480 virt_addr;
481 sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
482 }
483
484 sgc->sge.prd.sge_cnt--;
485 /* Build the simple SGE */
486 sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
487 sgc->sge.prd.curr->address = cpu_to_le64(addr);
488
489 /* Used another element. Point to the next one */
490
491 sgc->sge.prd.curr++;
492
493 /* Adjust length related fields */
494
495 sgc->cur_offset += len;
496 sgc->length -= len;
497
498 /*
499 * Check if we previously split an entry. If so we have to
500 * pick up where we left off.
501 */
502
503 if (rem) {
504 addr += len;
505 len = rem;
506 rem = 0;
507 goto another_entry;
508 }
509 }
510
511 if (!list_empty(&rq->sg_table_head)) {
512 if (sgc->sge.prd.chain) {
513 sgc->sge.prd.chain->ctl_len |=
514 cpu_to_le32(sgc->sge.prd.sgl_max_cnt
515 - sgc->sge.prd.sge_cnt
516 - numchain);
517 }
518 }
519
520 return true;
521}
522
523bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
524 struct esas2r_sg_context *sgc)
525{
526 struct esas2r_request *rq = sgc->first_req;
527 u32 len = sgc->length;
528 struct esas2r_target *t = a->targetdb + rq->target_id;
529 u8 is_i_o = 0;
530 u16 reqsize;
531 struct atto_physical_region_description *curr_iblk_chn;
532 u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
533
534 /*
535 * extract LBA from command so we can determine
536 * the I-Block boundary
537 */
538
539 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
540 && t->target_state == TS_PRESENT
541 && !(t->flags & TF_PASS_THRU)) {
542 u32 lbalo = 0;
543
544 switch (rq->vrq->scsi.cdb[0]) {
545 case READ_16:
546 case WRITE_16:
547 {
548 lbalo =
549 MAKEDWORD(MAKEWORD(cdb[9],
550 cdb[8]),
551 MAKEWORD(cdb[7],
552 cdb[6]));
553 is_i_o = 1;
554 break;
555 }
556
557 case READ_12:
558 case WRITE_12:
559 case READ_10:
560 case WRITE_10:
561 {
562 lbalo =
563 MAKEDWORD(MAKEWORD(cdb[5],
564 cdb[4]),
565 MAKEWORD(cdb[3],
566 cdb[2]));
567 is_i_o = 1;
568 break;
569 }
570
571 case READ_6:
572 case WRITE_6:
573 {
574 lbalo =
575 MAKEDWORD(MAKEWORD(cdb[3],
576 cdb[2]),
577 MAKEWORD(cdb[1] & 0x1F,
578 0));
579 is_i_o = 1;
580 break;
581 }
582
583 default:
584 break;
585 }
586
587 if (is_i_o) {
588 u32 startlba;
589
590 rq->vrq->scsi.iblk_cnt_prd = 0;
591
592 /* Determine size of 1st I-block PRD list */
593 startlba = t->inter_block - (lbalo & (t->inter_block -
594 1));
595 sgc->length = startlba * t->block_size;
596
597 /* Chk if the 1st iblk chain starts at base of Iblock */
598 if ((lbalo & (t->inter_block - 1)) == 0)
599 rq->flags |= RF_1ST_IBLK_BASE;
600
601 if (sgc->length > len)
602 sgc->length = len;
603 } else {
604 sgc->length = len;
605 }
606 } else {
607 sgc->length = len;
608 }
609
610 /* get our starting chain address */
611
612 curr_iblk_chn =
613 (struct atto_physical_region_description *)sgc->sge.a64.curr;
614
615 sgc->sge.prd.sgl_max_cnt = sgl_page_size /
616 sizeof(struct
617 atto_physical_region_description);
618
619 /* create all of the I-block PRD lists */
620
621 while (len) {
622 sgc->sge.prd.sge_cnt = 0;
623 sgc->sge.prd.chain = NULL;
624 sgc->sge.prd.curr = curr_iblk_chn;
625
626 /* increment to next I-Block */
627
628 len -= sgc->length;
629
630 /* go build the next I-Block PRD list */
631
632 if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
633 return false;
634
635 curr_iblk_chn++;
636
637 if (is_i_o) {
638 rq->vrq->scsi.iblk_cnt_prd++;
639
640 if (len > t->inter_byte)
641 sgc->length = t->inter_byte;
642 else
643 sgc->length = len;
644 }
645 }
646
647 /* figure out the size used of the VDA request */
648
649 reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
650 / sizeof(u32);
651
652 /*
653 * only update the request size if it is bigger than what is
654 * already there. we can come in here twice for some management
655 * commands.
656 */
657
658 if (reqsize > rq->vda_req_sz)
659 rq->vda_req_sz = reqsize;
660
661 return true;
662}
663
664static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
665{
666 u32 delta = currtime - a->chip_init_time;
667
668 if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
669 /* Wait before accessing registers */
670 } else if (delta >= ESAS2R_CHPRST_TIME) {
671 /*
672 * The last reset failed so try again. Reset
673 * processing will give up after three tries.
674 */
675 esas2r_local_reset_adapter(a);
676 } else {
677 /* We can now see if the firmware is ready */
678 u32 doorbell;
679
680 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
681 if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
682 esas2r_force_interrupt(a);
683 } else {
684 u32 ver = (doorbell & DRBL_FW_VER_MSK);
685
686 /* Driver supports API version 0 and 1 */
687 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
688 doorbell);
689 if (ver == DRBL_FW_VER_0) {
690 esas2r_lock_set_flags(&a->flags,
691 AF_CHPRST_DETECTED);
692 esas2r_lock_set_flags(&a->flags,
693 AF_LEGACY_SGE_MODE);
694
695 a->max_vdareq_size = 128;
696 a->build_sgl = esas2r_build_sg_list_sge;
697 } else if (ver == DRBL_FW_VER_1) {
698 esas2r_lock_set_flags(&a->flags,
699 AF_CHPRST_DETECTED);
700 esas2r_lock_clear_flags(&a->flags,
701 AF_LEGACY_SGE_MODE);
702
703 a->max_vdareq_size = 1024;
704 a->build_sgl = esas2r_build_sg_list_prd;
705 } else {
706 esas2r_local_reset_adapter(a);
707 }
708 }
709 }
710}
711
712
713/* This function must be called once per timer tick */
714void esas2r_timer_tick(struct esas2r_adapter *a)
715{
716 u32 currtime = jiffies_to_msecs(jiffies);
717 u32 deltatime = currtime - a->last_tick_time;
718
719 a->last_tick_time = currtime;
720
721 /* count down the uptime */
722 if (a->chip_uptime
723 && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
724 if (deltatime >= a->chip_uptime)
725 a->chip_uptime = 0;
726 else
727 a->chip_uptime -= deltatime;
728 }
729
730 if (a->flags & AF_CHPRST_PENDING) {
731 if (!(a->flags & AF_CHPRST_NEEDED)
732 && !(a->flags & AF_CHPRST_DETECTED))
733 esas2r_handle_pending_reset(a, currtime);
734 } else {
735 if (a->flags & AF_DISC_PENDING)
736 esas2r_disc_check_complete(a);
737
738 if (a->flags & AF_HEARTBEAT_ENB) {
739 if (a->flags & AF_HEARTBEAT) {
740 if ((currtime - a->heartbeat_time) >=
741 ESAS2R_HEARTBEAT_TIME) {
742 esas2r_lock_clear_flags(&a->flags,
743 AF_HEARTBEAT);
744 esas2r_hdebug("heartbeat failed");
745 esas2r_log(ESAS2R_LOG_CRIT,
746 "heartbeat failed");
747 esas2r_bugon();
748 esas2r_local_reset_adapter(a);
749 }
750 } else {
751 esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
752 a->heartbeat_time = currtime;
753 esas2r_force_interrupt(a);
754 }
755 }
756 }
757
758 if (atomic_read(&a->disable_cnt) == 0)
759 esas2r_do_deferred_processes(a);
760}
761
762/*
763 * Send the specified task management function to the target and LUN
764 * specified in rqaux. in addition, immediately abort any commands that
765 * are queued but not sent to the device according to the rules specified
766 * by the task management function.
767 */
768bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
769 struct esas2r_request *rqaux, u8 task_mgt_func)
770{
771 u16 targetid = rqaux->target_id;
772 u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
773 bool ret = false;
774 struct esas2r_request *rq;
775 struct list_head *next, *element;
776 unsigned long flags;
777
778 LIST_HEAD(comp_list);
779
780 esas2r_trace_enter();
781 esas2r_trace("rqaux:%p", rqaux);
782 esas2r_trace("task_mgt_func:%x", task_mgt_func);
783 spin_lock_irqsave(&a->queue_lock, flags);
784
785 /* search the defer queue looking for requests for the device */
786 list_for_each_safe(element, next, &a->defer_list) {
787 rq = list_entry(element, struct esas2r_request, req_list);
788
789 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
790 && rq->target_id == targetid
791 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
792 || task_mgt_func == 0x20)) { /* target reset */
793 /* Found a request affected by the task management */
794 if (rq->req_stat == RS_PENDING) {
795 /*
796 * The request is pending or waiting. We can
797 * safelycomplete the request now.
798 */
799 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
800 list_add_tail(&rq->comp_list,
801 &comp_list);
802 }
803 }
804 }
805
806 /* Send the task management request to the firmware */
807 rqaux->sense_len = 0;
808 rqaux->vrq->scsi.length = 0;
809 rqaux->target_id = targetid;
810 rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
811 memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
812 rqaux->vrq->scsi.flags |=
813 cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
814
815 if (a->flags & AF_FLASHING) {
816 /* Assume success. if there are active requests, return busy */
817 rqaux->req_stat = RS_SUCCESS;
818
819 list_for_each_safe(element, next, &a->active_list) {
820 rq = list_entry(element, struct esas2r_request,
821 req_list);
822 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
823 && rq->target_id == targetid
824 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
825 || task_mgt_func == 0x20)) /* target reset */
826 rqaux->req_stat = RS_BUSY;
827 }
828
829 ret = true;
830 }
831
832 spin_unlock_irqrestore(&a->queue_lock, flags);
833
834 if (!(a->flags & AF_FLASHING))
835 esas2r_start_request(a, rqaux);
836
837 esas2r_comp_list_drain(a, &comp_list);
838
839 if (atomic_read(&a->disable_cnt) == 0)
840 esas2r_do_deferred_processes(a);
841
842 esas2r_trace_exit();
843
844 return ret;
845}
846
847void esas2r_reset_bus(struct esas2r_adapter *a)
848{
849 esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
850
851 if (!(a->flags & AF_DEGRADED_MODE)
852 && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
853 esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
854 esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
855 esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
856
857 esas2r_schedule_tasklet(a);
858 }
859}
860
861bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
862 u8 status)
863{
864 esas2r_trace_enter();
865 esas2r_trace("rq:%p", rq);
866 list_del_init(&rq->req_list);
867 if (rq->timeout > RQ_MAX_TIMEOUT) {
868 /*
869 * The request timed out, but we could not abort it because a
870 * chip reset occurred. Return busy status.
871 */
872 rq->req_stat = RS_BUSY;
873 esas2r_trace_exit();
874 return true;
875 }
876
877 rq->req_stat = status;
878 esas2r_trace_exit();
879 return true;
880}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
new file mode 100644
index 000000000000..f3d0cb885972
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -0,0 +1,2110 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_ioctl.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46/*
47 * Buffered ioctl handlers. A buffered ioctl is one which requires that we
48 * allocate a DMA-able memory area to communicate with the firmware. In
49 * order to prevent continually allocating and freeing consistent memory,
50 * we will allocate a global buffer the first time we need it and re-use
51 * it for subsequent ioctl calls that require it.
52 */
53
54u8 *esas2r_buffered_ioctl;
55dma_addr_t esas2r_buffered_ioctl_addr;
56u32 esas2r_buffered_ioctl_size;
57struct pci_dev *esas2r_buffered_ioctl_pcid;
58
59static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
60typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
61 struct esas2r_request *,
62 struct esas2r_sg_context *,
63 void *);
64typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
65 struct esas2r_request *, void *);
66
67struct esas2r_buffered_ioctl {
68 struct esas2r_adapter *a;
69 void *ioctl;
70 u32 length;
71 u32 control_code;
72 u32 offset;
73 BUFFERED_IOCTL_CALLBACK
74 callback;
75 void *context;
76 BUFFERED_IOCTL_DONE_CALLBACK
77 done_callback;
78 void *done_context;
79
80};
81
82static void complete_fm_api_req(struct esas2r_adapter *a,
83 struct esas2r_request *rq)
84{
85 a->fm_api_command_done = 1;
86 wake_up_interruptible(&a->fm_api_waiter);
87}
88
89/* Callbacks for building scatter/gather lists for FM API requests */
90static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
91{
92 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
93 int offset = sgc->cur_offset - a->save_offset;
94
95 (*addr) = a->firmware.phys + offset;
96 return a->firmware.orig_len - offset;
97}
98
99static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
100{
101 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
102 int offset = sgc->cur_offset - a->save_offset;
103
104 (*addr) = a->firmware.header_buff_phys + offset;
105 return sizeof(struct esas2r_flash_img) - offset;
106}
107
108/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
109static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
110{
111 struct esas2r_request *rq;
112
113 if (down_interruptible(&a->fm_api_semaphore)) {
114 fi->status = FI_STAT_BUSY;
115 return;
116 }
117
118 rq = esas2r_alloc_request(a);
119 if (rq == NULL) {
120 up(&a->fm_api_semaphore);
121 fi->status = FI_STAT_BUSY;
122 return;
123 }
124
125 if (fi == &a->firmware.header) {
126 a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
127 (size_t)sizeof(
128 struct
129 esas2r_flash_img),
130 (dma_addr_t *)&a->
131 firmware.
132 header_buff_phys,
133 GFP_KERNEL);
134
135 if (a->firmware.header_buff == NULL) {
136 esas2r_debug("failed to allocate header buffer!");
137 fi->status = FI_STAT_BUSY;
138 return;
139 }
140
141 memcpy(a->firmware.header_buff, fi,
142 sizeof(struct esas2r_flash_img));
143 a->save_offset = a->firmware.header_buff;
144 a->fm_api_sgc.get_phys_addr =
145 (PGETPHYSADDR)get_physaddr_fm_api_header;
146 } else {
147 a->save_offset = (u8 *)fi;
148 a->fm_api_sgc.get_phys_addr =
149 (PGETPHYSADDR)get_physaddr_fm_api;
150 }
151
152 rq->comp_cb = complete_fm_api_req;
153 a->fm_api_command_done = 0;
154 a->fm_api_sgc.cur_offset = a->save_offset;
155
156 if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
157 &a->fm_api_sgc))
158 goto all_done;
159
160 /* Now wait around for it to complete. */
161 while (!a->fm_api_command_done)
162 wait_event_interruptible(a->fm_api_waiter,
163 a->fm_api_command_done);
164all_done:
165 if (fi == &a->firmware.header) {
166 memcpy(fi, a->firmware.header_buff,
167 sizeof(struct esas2r_flash_img));
168
169 dma_free_coherent(&a->pcid->dev,
170 (size_t)sizeof(struct esas2r_flash_img),
171 a->firmware.header_buff,
172 (dma_addr_t)a->firmware.header_buff_phys);
173 }
174
175 up(&a->fm_api_semaphore);
176 esas2r_free_request(a, (struct esas2r_request *)rq);
177 return;
178
179}
180
181static void complete_nvr_req(struct esas2r_adapter *a,
182 struct esas2r_request *rq)
183{
184 a->nvram_command_done = 1;
185 wake_up_interruptible(&a->nvram_waiter);
186}
187
188/* Callback for building scatter/gather lists for buffered ioctls */
189static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
190 u64 *addr)
191{
192 int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
193
194 (*addr) = esas2r_buffered_ioctl_addr + offset;
195 return esas2r_buffered_ioctl_size - offset;
196}
197
198static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
199 struct esas2r_request *rq)
200{
201 a->buffered_ioctl_done = 1;
202 wake_up_interruptible(&a->buffered_ioctl_waiter);
203}
204
205static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
206{
207 struct esas2r_adapter *a = bi->a;
208 struct esas2r_request *rq;
209 struct esas2r_sg_context sgc;
210 u8 result = IOCTL_SUCCESS;
211
212 if (down_interruptible(&buffered_ioctl_semaphore))
213 return IOCTL_OUT_OF_RESOURCES;
214
215 /* allocate a buffer or use the existing buffer. */
216 if (esas2r_buffered_ioctl) {
217 if (esas2r_buffered_ioctl_size < bi->length) {
218 /* free the too-small buffer and get a new one */
219 dma_free_coherent(&a->pcid->dev,
220 (size_t)esas2r_buffered_ioctl_size,
221 esas2r_buffered_ioctl,
222 esas2r_buffered_ioctl_addr);
223
224 goto allocate_buffer;
225 }
226 } else {
227allocate_buffer:
228 esas2r_buffered_ioctl_size = bi->length;
229 esas2r_buffered_ioctl_pcid = a->pcid;
230 esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
231 (size_t)
232 esas2r_buffered_ioctl_size,
233 &
234 esas2r_buffered_ioctl_addr,
235 GFP_KERNEL);
236 }
237
238 if (!esas2r_buffered_ioctl) {
239 esas2r_log(ESAS2R_LOG_CRIT,
240 "could not allocate %d bytes of consistent memory "
241 "for a buffered ioctl!",
242 bi->length);
243
244 esas2r_debug("buffered ioctl alloc failure");
245 result = IOCTL_OUT_OF_RESOURCES;
246 goto exit_cleanly;
247 }
248
249 memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
250
251 rq = esas2r_alloc_request(a);
252 if (rq == NULL) {
253 esas2r_log(ESAS2R_LOG_CRIT,
254 "could not allocate an internal request");
255
256 result = IOCTL_OUT_OF_RESOURCES;
257 esas2r_debug("buffered ioctl - no requests");
258 goto exit_cleanly;
259 }
260
261 a->buffered_ioctl_done = 0;
262 rq->comp_cb = complete_buffered_ioctl_req;
263 sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
264 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
265 sgc.length = esas2r_buffered_ioctl_size;
266
267 if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
268 /* completed immediately, no need to wait */
269 a->buffered_ioctl_done = 0;
270 goto free_andexit_cleanly;
271 }
272
273 /* now wait around for it to complete. */
274 while (!a->buffered_ioctl_done)
275 wait_event_interruptible(a->buffered_ioctl_waiter,
276 a->buffered_ioctl_done);
277
278free_andexit_cleanly:
279 if (result == IOCTL_SUCCESS && bi->done_callback)
280 (*bi->done_callback)(a, rq, bi->done_context);
281
282 esas2r_free_request(a, rq);
283
284exit_cleanly:
285 if (result == IOCTL_SUCCESS)
286 memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
287
288 up(&buffered_ioctl_semaphore);
289 return result;
290}
291
292/* SMP ioctl support */
293static int smp_ioctl_callback(struct esas2r_adapter *a,
294 struct esas2r_request *rq,
295 struct esas2r_sg_context *sgc, void *context)
296{
297 struct atto_ioctl_smp *si =
298 (struct atto_ioctl_smp *)esas2r_buffered_ioctl;
299
300 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
301 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
302
303 if (!esas2r_build_sg_list(a, rq, sgc)) {
304 si->status = ATTO_STS_OUT_OF_RSRC;
305 return false;
306 }
307
308 esas2r_start_request(a, rq);
309 return true;
310}
311
312static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
313{
314 struct esas2r_buffered_ioctl bi;
315
316 memset(&bi, 0, sizeof(bi));
317
318 bi.a = a;
319 bi.ioctl = si;
320 bi.length = sizeof(struct atto_ioctl_smp)
321 + le32_to_cpu(si->req_length)
322 + le32_to_cpu(si->rsp_length);
323 bi.offset = 0;
324 bi.callback = smp_ioctl_callback;
325 return handle_buffered_ioctl(&bi);
326}
327
328
329/* CSMI ioctl support */
330static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
331 struct esas2r_request *rq)
332{
333 rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
334 rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
335
336 /* Now call the original completion callback. */
337 (*rq->aux_req_cb)(a, rq);
338}
339
340/* Tunnel a CSMI IOCTL to the back end driver for processing. */
341static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
342 union atto_ioctl_csmi *ci,
343 struct esas2r_request *rq,
344 struct esas2r_sg_context *sgc,
345 u32 ctrl_code,
346 u16 target_id)
347{
348 struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
349
350 if (a->flags & AF_DEGRADED_MODE)
351 return false;
352
353 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
354 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
355 ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
356 ioctl->csmi.target_id = cpu_to_le16(target_id);
357 ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
358
359 /*
360 * Always usurp the completion callback since the interrupt callback
361 * mechanism may be used.
362 */
363 rq->aux_req_cx = ci;
364 rq->aux_req_cb = rq->comp_cb;
365 rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
366
367 if (!esas2r_build_sg_list(a, rq, sgc))
368 return false;
369
370 esas2r_start_request(a, rq);
371 return true;
372}
373
374static bool check_lun(struct scsi_lun lun)
375{
376 bool result;
377
378 result = ((lun.scsi_lun[7] == 0) &&
379 (lun.scsi_lun[6] == 0) &&
380 (lun.scsi_lun[5] == 0) &&
381 (lun.scsi_lun[4] == 0) &&
382 (lun.scsi_lun[3] == 0) &&
383 (lun.scsi_lun[2] == 0) &&
384/* Byte 1 is intentionally skipped */
385 (lun.scsi_lun[0] == 0));
386
387 return result;
388}
389
390static int csmi_ioctl_callback(struct esas2r_adapter *a,
391 struct esas2r_request *rq,
392 struct esas2r_sg_context *sgc, void *context)
393{
394 struct atto_csmi *ci = (struct atto_csmi *)context;
395 union atto_ioctl_csmi *ioctl_csmi =
396 (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
397 u8 path = 0;
398 u8 tid = 0;
399 u8 lun = 0;
400 u32 sts = CSMI_STS_SUCCESS;
401 struct esas2r_target *t;
402 unsigned long flags;
403
404 if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
405 struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
406
407 path = gda->path_id;
408 tid = gda->target_id;
409 lun = gda->lun;
410 } else if (ci->control_code == CSMI_CC_TASK_MGT) {
411 struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
412
413 path = tm->path_id;
414 tid = tm->target_id;
415 lun = tm->lun;
416 }
417
418 if (path > 0 || tid > ESAS2R_MAX_ID) {
419 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
420 CSMI_STS_INV_PARAM);
421 return false;
422 }
423
424 rq->target_id = tid;
425 rq->vrq->scsi.flags |= cpu_to_le32(lun);
426
427 switch (ci->control_code) {
428 case CSMI_CC_GET_DRVR_INFO:
429 {
430 struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
431
432 strcpy(gdi->description, esas2r_get_model_name(a));
433 gdi->csmi_major_rev = CSMI_MAJOR_REV;
434 gdi->csmi_minor_rev = CSMI_MINOR_REV;
435 break;
436 }
437
438 case CSMI_CC_GET_CNTLR_CFG:
439 {
440 struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
441
442 gcc->base_io_addr = 0;
443 pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
444 &gcc->base_memaddr_lo);
445 pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
446 &gcc->base_memaddr_hi);
447 gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
448 a->pcid->subsystem_vendor);
449 gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
450 gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
451 gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
452 gcc->pci_addr.bus_num = a->pcid->bus->number;
453 gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
454 gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
455
456 memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
457
458 gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
459 gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
460 gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
461 gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
462 gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
463 gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
464 gcc->bios_build_rev = LOWORD(a->flash_ver);
465
466 if (a->flags2 & AF2_THUNDERLINK)
467 gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
468 | CSMI_CNTLRF_SATA_HBA;
469 else
470 gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
471 | CSMI_CNTLRF_SATA_RAID;
472
473 gcc->rrom_major_rev = 0;
474 gcc->rrom_minor_rev = 0;
475 gcc->rrom_build_rev = 0;
476 gcc->rrom_release_rev = 0;
477 gcc->rrom_biosmajor_rev = 0;
478 gcc->rrom_biosminor_rev = 0;
479 gcc->rrom_biosbuild_rev = 0;
480 gcc->rrom_biosrelease_rev = 0;
481 break;
482 }
483
484 case CSMI_CC_GET_CNTLR_STS:
485 {
486 struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
487
488 if (a->flags & AF_DEGRADED_MODE)
489 gcs->status = CSMI_CNTLR_STS_FAILED;
490 else
491 gcs->status = CSMI_CNTLR_STS_GOOD;
492
493 gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
494 break;
495 }
496
497 case CSMI_CC_FW_DOWNLOAD:
498 case CSMI_CC_GET_RAID_INFO:
499 case CSMI_CC_GET_RAID_CFG:
500
501 sts = CSMI_STS_BAD_CTRL_CODE;
502 break;
503
504 case CSMI_CC_SMP_PASSTHRU:
505 case CSMI_CC_SSP_PASSTHRU:
506 case CSMI_CC_STP_PASSTHRU:
507 case CSMI_CC_GET_PHY_INFO:
508 case CSMI_CC_SET_PHY_INFO:
509 case CSMI_CC_GET_LINK_ERRORS:
510 case CSMI_CC_GET_SATA_SIG:
511 case CSMI_CC_GET_CONN_INFO:
512 case CSMI_CC_PHY_CTRL:
513
514 if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
515 ci->control_code,
516 ESAS2R_TARG_ID_INV)) {
517 sts = CSMI_STS_FAILED;
518 break;
519 }
520
521 return true;
522
523 case CSMI_CC_GET_SCSI_ADDR:
524 {
525 struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
526
527 struct scsi_lun lun;
528
529 memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
530
531 if (!check_lun(lun)) {
532 sts = CSMI_STS_NO_SCSI_ADDR;
533 break;
534 }
535
536 /* make sure the device is present */
537 spin_lock_irqsave(&a->mem_lock, flags);
538 t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
539 spin_unlock_irqrestore(&a->mem_lock, flags);
540
541 if (t == NULL) {
542 sts = CSMI_STS_NO_SCSI_ADDR;
543 break;
544 }
545
546 gsa->host_index = 0xFF;
547 gsa->lun = gsa->sas_lun[1];
548 rq->target_id = esas2r_targ_get_id(t, a);
549 break;
550 }
551
552 case CSMI_CC_GET_DEV_ADDR:
553 {
554 struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
555
556 /* make sure the target is present */
557 t = a->targetdb + rq->target_id;
558
559 if (t >= a->targetdb_end
560 || t->target_state != TS_PRESENT
561 || t->sas_addr == 0) {
562 sts = CSMI_STS_NO_DEV_ADDR;
563 break;
564 }
565
566 /* fill in the result */
567 *(u64 *)gda->sas_addr = t->sas_addr;
568 memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
569 gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
570 break;
571 }
572
573 case CSMI_CC_TASK_MGT:
574
575 /* make sure the target is present */
576 t = a->targetdb + rq->target_id;
577
578 if (t >= a->targetdb_end
579 || t->target_state != TS_PRESENT
580 || !(t->flags & TF_PASS_THRU)) {
581 sts = CSMI_STS_NO_DEV_ADDR;
582 break;
583 }
584
585 if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
586 ci->control_code,
587 t->phys_targ_id)) {
588 sts = CSMI_STS_FAILED;
589 break;
590 }
591
592 return true;
593
594 default:
595
596 sts = CSMI_STS_BAD_CTRL_CODE;
597 break;
598 }
599
600 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
601
602 return false;
603}
604
605
606static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
607 struct esas2r_request *rq, void *context)
608{
609 struct atto_csmi *ci = (struct atto_csmi *)context;
610 union atto_ioctl_csmi *ioctl_csmi =
611 (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
612
613 switch (ci->control_code) {
614 case CSMI_CC_GET_DRVR_INFO:
615 {
616 struct atto_csmi_get_driver_info *gdi =
617 &ioctl_csmi->drvr_info;
618
619 strcpy(gdi->name, ESAS2R_VERSION_STR);
620
621 gdi->major_rev = ESAS2R_MAJOR_REV;
622 gdi->minor_rev = ESAS2R_MINOR_REV;
623 gdi->build_rev = 0;
624 gdi->release_rev = 0;
625 break;
626 }
627
628 case CSMI_CC_GET_SCSI_ADDR:
629 {
630 struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
631
632 if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
633 CSMI_STS_SUCCESS) {
634 gsa->target_id = rq->target_id;
635 gsa->path_id = 0;
636 }
637
638 break;
639 }
640 }
641
642 ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
643}
644
645
646static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
647{
648 struct esas2r_buffered_ioctl bi;
649
650 memset(&bi, 0, sizeof(bi));
651
652 bi.a = a;
653 bi.ioctl = &ci->data;
654 bi.length = sizeof(union atto_ioctl_csmi);
655 bi.offset = 0;
656 bi.callback = csmi_ioctl_callback;
657 bi.context = ci;
658 bi.done_callback = csmi_ioctl_done_callback;
659 bi.done_context = ci;
660
661 return handle_buffered_ioctl(&bi);
662}
663
664/* ATTO HBA ioctl support */
665
666/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
667static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
668 struct atto_ioctl *hi,
669 struct esas2r_request *rq,
670 struct esas2r_sg_context *sgc)
671{
672 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
673
674 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
675
676 if (!esas2r_build_sg_list(a, rq, sgc)) {
677 hi->status = ATTO_STS_OUT_OF_RSRC;
678
679 return false;
680 }
681
682 esas2r_start_request(a, rq);
683
684 return true;
685}
686
687static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
688 struct esas2r_request *rq)
689{
690 struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
691 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
692 u8 sts = ATTO_SPT_RS_FAILED;
693
694 spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
695 spt->sense_length = rq->sense_len;
696 spt->residual_length =
697 le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
698
699 switch (rq->req_stat) {
700 case RS_SUCCESS:
701 case RS_SCSI_ERROR:
702 sts = ATTO_SPT_RS_SUCCESS;
703 break;
704 case RS_UNDERRUN:
705 sts = ATTO_SPT_RS_UNDERRUN;
706 break;
707 case RS_OVERRUN:
708 sts = ATTO_SPT_RS_OVERRUN;
709 break;
710 case RS_SEL:
711 case RS_SEL2:
712 sts = ATTO_SPT_RS_NO_DEVICE;
713 break;
714 case RS_NO_LUN:
715 sts = ATTO_SPT_RS_NO_LUN;
716 break;
717 case RS_TIMEOUT:
718 sts = ATTO_SPT_RS_TIMEOUT;
719 break;
720 case RS_DEGRADED:
721 sts = ATTO_SPT_RS_DEGRADED;
722 break;
723 case RS_BUSY:
724 sts = ATTO_SPT_RS_BUSY;
725 break;
726 case RS_ABORTED:
727 sts = ATTO_SPT_RS_ABORTED;
728 break;
729 case RS_RESET:
730 sts = ATTO_SPT_RS_BUS_RESET;
731 break;
732 }
733
734 spt->req_status = sts;
735
736 /* Update the target ID to the next one present. */
737 spt->target_id =
738 esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
739
740 /* Done, call the completion callback. */
741 (*rq->aux_req_cb)(a, rq);
742}
743
744static int hba_ioctl_callback(struct esas2r_adapter *a,
745 struct esas2r_request *rq,
746 struct esas2r_sg_context *sgc,
747 void *context)
748{
749 struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
750
751 hi->status = ATTO_STS_SUCCESS;
752
753 switch (hi->function) {
754 case ATTO_FUNC_GET_ADAP_INFO:
755 {
756 u8 *class_code = (u8 *)&a->pcid->class;
757
758 struct atto_hba_get_adapter_info *gai =
759 &hi->data.get_adap_info;
760 int pcie_cap_reg;
761
762 if (hi->flags & HBAF_TUNNEL) {
763 hi->status = ATTO_STS_UNSUPPORTED;
764 break;
765 }
766
767 if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
768 hi->status = ATTO_STS_INV_VERSION;
769 hi->version = ATTO_VER_GET_ADAP_INFO0;
770 break;
771 }
772
773 memset(gai, 0, sizeof(*gai));
774
775 gai->pci.vendor_id = a->pcid->vendor;
776 gai->pci.device_id = a->pcid->device;
777 gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
778 gai->pci.ss_device_id = a->pcid->subsystem_device;
779 gai->pci.class_code[0] = class_code[0];
780 gai->pci.class_code[1] = class_code[1];
781 gai->pci.class_code[2] = class_code[2];
782 gai->pci.rev_id = a->pcid->revision;
783 gai->pci.bus_num = a->pcid->bus->number;
784 gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
785 gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
786
787 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
788 if (pcie_cap_reg) {
789 u16 stat;
790 u32 caps;
791
792 pci_read_config_word(a->pcid,
793 pcie_cap_reg + PCI_EXP_LNKSTA,
794 &stat);
795 pci_read_config_dword(a->pcid,
796 pcie_cap_reg + PCI_EXP_LNKCAP,
797 &caps);
798
799 gai->pci.link_speed_curr =
800 (u8)(stat & PCI_EXP_LNKSTA_CLS);
801 gai->pci.link_speed_max =
802 (u8)(caps & PCI_EXP_LNKCAP_SLS);
803 gai->pci.link_width_curr =
804 (u8)((stat & PCI_EXP_LNKSTA_NLW)
805 >> PCI_EXP_LNKSTA_NLW_SHIFT);
806 gai->pci.link_width_max =
807 (u8)((caps & PCI_EXP_LNKCAP_MLW)
808 >> 4);
809 }
810
811 gai->pci.msi_vector_cnt = 1;
812
813 if (a->pcid->msix_enabled)
814 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
815 else if (a->pcid->msi_enabled)
816 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
817 else
818 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
819
820 gai->adap_type = ATTO_GAI_AT_ESASRAID2;
821
822 if (a->flags2 & AF2_THUNDERLINK)
823 gai->adap_type = ATTO_GAI_AT_TLSASHBA;
824
825 if (a->flags & AF_DEGRADED_MODE)
826 gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
827
828 gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
829 ATTO_GAI_AF_DEVADDR_SUPP;
830
831 if (a->pcid->subsystem_device == ATTO_ESAS_R60F
832 || a->pcid->subsystem_device == ATTO_ESAS_R608
833 || a->pcid->subsystem_device == ATTO_ESAS_R644
834 || a->pcid->subsystem_device == ATTO_TSSC_3808E)
835 gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
836
837 gai->num_ports = ESAS2R_NUM_PHYS;
838 gai->num_phys = ESAS2R_NUM_PHYS;
839
840 strcpy(gai->firmware_rev, a->fw_rev);
841 strcpy(gai->flash_rev, a->flash_rev);
842 strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
843 strcpy(gai->model_name, esas2r_get_model_name(a));
844
845 gai->num_targets = ESAS2R_MAX_TARGETS;
846
847 gai->num_busses = 1;
848 gai->num_targsper_bus = gai->num_targets;
849 gai->num_lunsper_targ = 256;
850
851 if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
852 || a->pcid->subsystem_device == ATTO_ESAS_R60F)
853 gai->num_connectors = 4;
854 else
855 gai->num_connectors = 2;
856
857 gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
858
859 gai->num_targets_backend = a->num_targets_backend;
860
861 gai->tunnel_flags = a->ioctl_tunnel
862 & (ATTO_GAI_TF_MEM_RW
863 | ATTO_GAI_TF_TRACE
864 | ATTO_GAI_TF_SCSI_PASS_THRU
865 | ATTO_GAI_TF_GET_DEV_ADDR
866 | ATTO_GAI_TF_PHY_CTRL
867 | ATTO_GAI_TF_CONN_CTRL
868 | ATTO_GAI_TF_GET_DEV_INFO);
869 break;
870 }
871
872 case ATTO_FUNC_GET_ADAP_ADDR:
873 {
874 struct atto_hba_get_adapter_address *gaa =
875 &hi->data.get_adap_addr;
876
877 if (hi->flags & HBAF_TUNNEL) {
878 hi->status = ATTO_STS_UNSUPPORTED;
879 break;
880 }
881
882 if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
883 hi->status = ATTO_STS_INV_VERSION;
884 hi->version = ATTO_VER_GET_ADAP_ADDR0;
885 } else if (gaa->addr_type == ATTO_GAA_AT_PORT
886 || gaa->addr_type == ATTO_GAA_AT_NODE) {
887 if (gaa->addr_type == ATTO_GAA_AT_PORT
888 && gaa->port_id >= ESAS2R_NUM_PHYS) {
889 hi->status = ATTO_STS_NOT_APPL;
890 } else {
891 memcpy((u64 *)gaa->address,
892 &a->nvram->sas_addr[0], sizeof(u64));
893 gaa->addr_len = sizeof(u64);
894 }
895 } else {
896 hi->status = ATTO_STS_INV_PARAM;
897 }
898
899 break;
900 }
901
902 case ATTO_FUNC_MEM_RW:
903 {
904 if (hi->flags & HBAF_TUNNEL) {
905 if (hba_ioctl_tunnel(a, hi, rq, sgc))
906 return true;
907
908 break;
909 }
910
911 hi->status = ATTO_STS_UNSUPPORTED;
912
913 break;
914 }
915
916 case ATTO_FUNC_TRACE:
917 {
918 struct atto_hba_trace *trc = &hi->data.trace;
919
920 if (hi->flags & HBAF_TUNNEL) {
921 if (hba_ioctl_tunnel(a, hi, rq, sgc))
922 return true;
923
924 break;
925 }
926
927 if (hi->version > ATTO_VER_TRACE1) {
928 hi->status = ATTO_STS_INV_VERSION;
929 hi->version = ATTO_VER_TRACE1;
930 break;
931 }
932
933 if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
934 && hi->version >= ATTO_VER_TRACE1) {
935 if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
936 u32 len = hi->data_length;
937 u32 offset = trc->current_offset;
938 u32 total_len = ESAS2R_FWCOREDUMP_SZ;
939
940 /* Size is zero if a core dump isn't present */
941 if (!(a->flags2 & AF2_COREDUMP_SAVED))
942 total_len = 0;
943
944 if (len > total_len)
945 len = total_len;
946
947 if (offset >= total_len
948 || offset + len > total_len
949 || len == 0) {
950 hi->status = ATTO_STS_INV_PARAM;
951 break;
952 }
953
954 memcpy(trc + 1,
955 a->fw_coredump_buff + offset,
956 len);
957
958 hi->data_length = len;
959 } else if (trc->trace_func == ATTO_TRC_TF_RESET) {
960 memset(a->fw_coredump_buff, 0,
961 ESAS2R_FWCOREDUMP_SZ);
962
963 esas2r_lock_clear_flags(&a->flags2,
964 AF2_COREDUMP_SAVED);
965 } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
966 hi->status = ATTO_STS_UNSUPPORTED;
967 break;
968 }
969
970 /* Always return all the info we can. */
971 trc->trace_mask = 0;
972 trc->current_offset = 0;
973 trc->total_length = ESAS2R_FWCOREDUMP_SZ;
974
975 /* Return zero length buffer if core dump not present */
976 if (!(a->flags2 & AF2_COREDUMP_SAVED))
977 trc->total_length = 0;
978 } else {
979 hi->status = ATTO_STS_UNSUPPORTED;
980 }
981
982 break;
983 }
984
985 case ATTO_FUNC_SCSI_PASS_THRU:
986 {
987 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
988 struct scsi_lun lun;
989
990 memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
991
992 if (hi->flags & HBAF_TUNNEL) {
993 if (hba_ioctl_tunnel(a, hi, rq, sgc))
994 return true;
995
996 break;
997 }
998
999 if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
1000 hi->status = ATTO_STS_INV_VERSION;
1001 hi->version = ATTO_VER_SCSI_PASS_THRU0;
1002 break;
1003 }
1004
1005 if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
1006 hi->status = ATTO_STS_INV_PARAM;
1007 break;
1008 }
1009
1010 esas2r_sgc_init(sgc, a, rq, NULL);
1011
1012 sgc->length = hi->data_length;
1013 sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
1014 + sizeof(struct atto_hba_scsi_pass_thru);
1015
1016 /* Finish request initialization */
1017 rq->target_id = (u16)spt->target_id;
1018 rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
1019 memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
1020 rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
1021 rq->sense_len = spt->sense_length;
1022 rq->sense_buf = (u8 *)spt->sense_data;
1023 /* NOTE: we ignore spt->timeout */
1024
1025 /*
1026 * always usurp the completion callback since the interrupt
1027 * callback mechanism may be used.
1028 */
1029
1030 rq->aux_req_cx = hi;
1031 rq->aux_req_cb = rq->comp_cb;
1032 rq->comp_cb = scsi_passthru_comp_cb;
1033
1034 if (spt->flags & ATTO_SPTF_DATA_IN) {
1035 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
1036 } else if (spt->flags & ATTO_SPTF_DATA_OUT) {
1037 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
1038 } else {
1039 if (sgc->length) {
1040 hi->status = ATTO_STS_INV_PARAM;
1041 break;
1042 }
1043 }
1044
1045 if (spt->flags & ATTO_SPTF_ORDERED_Q)
1046 rq->vrq->scsi.flags |=
1047 cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
1048 else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
1049 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
1050
1051 if (!esas2r_build_sg_list(a, rq, sgc)) {
1052 hi->status = ATTO_STS_OUT_OF_RSRC;
1053 break;
1054 }
1055
1056 esas2r_start_request(a, rq);
1057
1058 return true;
1059 }
1060
1061 case ATTO_FUNC_GET_DEV_ADDR:
1062 {
1063 struct atto_hba_get_device_address *gda =
1064 &hi->data.get_dev_addr;
1065 struct esas2r_target *t;
1066
1067 if (hi->flags & HBAF_TUNNEL) {
1068 if (hba_ioctl_tunnel(a, hi, rq, sgc))
1069 return true;
1070
1071 break;
1072 }
1073
1074 if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
1075 hi->status = ATTO_STS_INV_VERSION;
1076 hi->version = ATTO_VER_GET_DEV_ADDR0;
1077 break;
1078 }
1079
1080 if (gda->target_id >= ESAS2R_MAX_TARGETS) {
1081 hi->status = ATTO_STS_INV_PARAM;
1082 break;
1083 }
1084
1085 t = a->targetdb + (u16)gda->target_id;
1086
1087 if (t->target_state != TS_PRESENT) {
1088 hi->status = ATTO_STS_FAILED;
1089 } else if (gda->addr_type == ATTO_GDA_AT_PORT) {
1090 if (t->sas_addr == 0) {
1091 hi->status = ATTO_STS_UNSUPPORTED;
1092 } else {
1093 *(u64 *)gda->address = t->sas_addr;
1094
1095 gda->addr_len = sizeof(u64);
1096 }
1097 } else if (gda->addr_type == ATTO_GDA_AT_NODE) {
1098 hi->status = ATTO_STS_NOT_APPL;
1099 } else {
1100 hi->status = ATTO_STS_INV_PARAM;
1101 }
1102
1103 /* update the target ID to the next one present. */
1104
1105 gda->target_id =
1106 esas2r_targ_db_find_next_present(a,
1107 (u16)gda->target_id);
1108 break;
1109 }
1110
1111 case ATTO_FUNC_PHY_CTRL:
1112 case ATTO_FUNC_CONN_CTRL:
1113 {
1114 if (hba_ioctl_tunnel(a, hi, rq, sgc))
1115 return true;
1116
1117 break;
1118 }
1119
1120 case ATTO_FUNC_ADAP_CTRL:
1121 {
1122 struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
1123
1124 if (hi->flags & HBAF_TUNNEL) {
1125 hi->status = ATTO_STS_UNSUPPORTED;
1126 break;
1127 }
1128
1129 if (hi->version > ATTO_VER_ADAP_CTRL0) {
1130 hi->status = ATTO_STS_INV_VERSION;
1131 hi->version = ATTO_VER_ADAP_CTRL0;
1132 break;
1133 }
1134
1135 if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
1136 esas2r_reset_adapter(a);
1137 } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
1138 hi->status = ATTO_STS_UNSUPPORTED;
1139 break;
1140 }
1141
1142 if (a->flags & AF_CHPRST_NEEDED)
1143 ac->adap_state = ATTO_AC_AS_RST_SCHED;
1144 else if (a->flags & AF_CHPRST_PENDING)
1145 ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
1146 else if (a->flags & AF_DISC_PENDING)
1147 ac->adap_state = ATTO_AC_AS_RST_DISC;
1148 else if (a->flags & AF_DISABLED)
1149 ac->adap_state = ATTO_AC_AS_DISABLED;
1150 else if (a->flags & AF_DEGRADED_MODE)
1151 ac->adap_state = ATTO_AC_AS_DEGRADED;
1152 else
1153 ac->adap_state = ATTO_AC_AS_OK;
1154
1155 break;
1156 }
1157
1158 case ATTO_FUNC_GET_DEV_INFO:
1159 {
1160 struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
1161 struct esas2r_target *t;
1162
1163 if (hi->flags & HBAF_TUNNEL) {
1164 if (hba_ioctl_tunnel(a, hi, rq, sgc))
1165 return true;
1166
1167 break;
1168 }
1169
1170 if (hi->version > ATTO_VER_GET_DEV_INFO0) {
1171 hi->status = ATTO_STS_INV_VERSION;
1172 hi->version = ATTO_VER_GET_DEV_INFO0;
1173 break;
1174 }
1175
1176 if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
1177 hi->status = ATTO_STS_INV_PARAM;
1178 break;
1179 }
1180
1181 t = a->targetdb + (u16)gdi->target_id;
1182
1183 /* update the target ID to the next one present. */
1184
1185 gdi->target_id =
1186 esas2r_targ_db_find_next_present(a,
1187 (u16)gdi->target_id);
1188
1189 if (t->target_state != TS_PRESENT) {
1190 hi->status = ATTO_STS_FAILED;
1191 break;
1192 }
1193
1194 hi->status = ATTO_STS_UNSUPPORTED;
1195 break;
1196 }
1197
1198 default:
1199
1200 hi->status = ATTO_STS_INV_FUNC;
1201 break;
1202 }
1203
1204 return false;
1205}
1206
1207static void hba_ioctl_done_callback(struct esas2r_adapter *a,
1208 struct esas2r_request *rq, void *context)
1209{
1210 struct atto_ioctl *ioctl_hba =
1211 (struct atto_ioctl *)esas2r_buffered_ioctl;
1212
1213 esas2r_debug("hba_ioctl_done_callback %d", a->index);
1214
1215 if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
1216 struct atto_hba_get_adapter_info *gai =
1217 &ioctl_hba->data.get_adap_info;
1218
1219 esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
1220
1221 gai->drvr_rev_major = ESAS2R_MAJOR_REV;
1222 gai->drvr_rev_minor = ESAS2R_MINOR_REV;
1223
1224 strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
1225 strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
1226
1227 gai->num_busses = 1;
1228 gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
1229 gai->num_lunsper_targ = 1;
1230 }
1231}
1232
1233u8 handle_hba_ioctl(struct esas2r_adapter *a,
1234 struct atto_ioctl *ioctl_hba)
1235{
1236 struct esas2r_buffered_ioctl bi;
1237
1238 memset(&bi, 0, sizeof(bi));
1239
1240 bi.a = a;
1241 bi.ioctl = ioctl_hba;
1242 bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
1243 bi.callback = hba_ioctl_callback;
1244 bi.context = NULL;
1245 bi.done_callback = hba_ioctl_done_callback;
1246 bi.done_context = NULL;
1247 bi.offset = 0;
1248
1249 return handle_buffered_ioctl(&bi);
1250}
1251
1252
1253int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
1254 struct esas2r_sas_nvram *data)
1255{
1256 int result = 0;
1257
1258 a->nvram_command_done = 0;
1259 rq->comp_cb = complete_nvr_req;
1260
1261 if (esas2r_nvram_write(a, rq, data)) {
1262 /* now wait around for it to complete. */
1263 while (!a->nvram_command_done)
1264 wait_event_interruptible(a->nvram_waiter,
1265 a->nvram_command_done);
1266 ;
1267
1268 /* done, check the status. */
1269 if (rq->req_stat == RS_SUCCESS)
1270 result = 1;
1271 }
1272 return result;
1273}
1274
1275
1276/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
1277int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
1278{
1279 struct atto_express_ioctl *ioctl = NULL;
1280 struct esas2r_adapter *a;
1281 struct esas2r_request *rq;
1282 u16 code;
1283 int err;
1284
1285 esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
1286
1287 if ((arg == NULL)
1288 || (cmd < EXPRESS_IOCTL_MIN)
1289 || (cmd > EXPRESS_IOCTL_MAX))
1290 return -ENOTSUPP;
1291
1292 if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
1293 esas2r_log(ESAS2R_LOG_WARN,
1294 "ioctl_handler access_ok failed for cmd %d, "
1295 "address %p", cmd,
1296 arg);
1297 return -EFAULT;
1298 }
1299
1300 /* allocate a kernel memory buffer for the IOCTL data */
1301 ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
1302 if (ioctl == NULL) {
1303 esas2r_log(ESAS2R_LOG_WARN,
1304 "ioctl_handler kzalloc failed for %d bytes",
1305 sizeof(struct atto_express_ioctl));
1306 return -ENOMEM;
1307 }
1308
1309 err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
1310 if (err != 0) {
1311 esas2r_log(ESAS2R_LOG_WARN,
1312 "copy_from_user didn't copy everything (err %d, cmd %d)",
1313 err,
1314 cmd);
1315 kfree(ioctl);
1316
1317 return -EFAULT;
1318 }
1319
1320 /* verify the signature */
1321
1322 if (memcmp(ioctl->header.signature,
1323 EXPRESS_IOCTL_SIGNATURE,
1324 EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
1325 esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
1326 kfree(ioctl);
1327
1328 return -ENOTSUPP;
1329 }
1330
1331 /* assume success */
1332
1333 ioctl->header.return_code = IOCTL_SUCCESS;
1334 err = 0;
1335
1336 /*
1337 * handle EXPRESS_IOCTL_GET_CHANNELS
1338 * without paying attention to channel
1339 */
1340
1341 if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
1342 int i = 0, k = 0;
1343
1344 ioctl->data.chanlist.num_channels = 0;
1345
1346 while (i < MAX_ADAPTERS) {
1347 if (esas2r_adapters[i]) {
1348 ioctl->data.chanlist.num_channels++;
1349 ioctl->data.chanlist.channel[k] = i;
1350 k++;
1351 }
1352 i++;
1353 }
1354
1355 goto ioctl_done;
1356 }
1357
1358 /* get the channel */
1359
1360 if (ioctl->header.channel == 0xFF) {
1361 a = (struct esas2r_adapter *)hostdata;
1362 } else {
1363 a = esas2r_adapters[ioctl->header.channel];
1364 if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
1365 ioctl->header.return_code = IOCTL_BAD_CHANNEL;
1366 esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
1367 kfree(ioctl);
1368
1369 return -ENOTSUPP;
1370 }
1371 }
1372
1373 switch (cmd) {
1374 case EXPRESS_IOCTL_RW_FIRMWARE:
1375
1376 if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
1377 err = esas2r_write_fw(a,
1378 (char *)ioctl->data.fwrw.image,
1379 0,
1380 sizeof(struct
1381 atto_express_ioctl));
1382
1383 if (err >= 0) {
1384 err = esas2r_read_fw(a,
1385 (char *)ioctl->data.fwrw.
1386 image,
1387 0,
1388 sizeof(struct
1389 atto_express_ioctl));
1390 }
1391 } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
1392 err = esas2r_write_fs(a,
1393 (char *)ioctl->data.fwrw.image,
1394 0,
1395 sizeof(struct
1396 atto_express_ioctl));
1397
1398 if (err >= 0) {
1399 err = esas2r_read_fs(a,
1400 (char *)ioctl->data.fwrw.
1401 image,
1402 0,
1403 sizeof(struct
1404 atto_express_ioctl));
1405 }
1406 } else {
1407 ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
1408 }
1409
1410 break;
1411
1412 case EXPRESS_IOCTL_READ_PARAMS:
1413
1414 memcpy(ioctl->data.prw.data_buffer, a->nvram,
1415 sizeof(struct esas2r_sas_nvram));
1416 ioctl->data.prw.code = 1;
1417 break;
1418
1419 case EXPRESS_IOCTL_WRITE_PARAMS:
1420
1421 rq = esas2r_alloc_request(a);
1422 if (rq == NULL) {
1423 up(&a->nvram_semaphore);
1424 ioctl->data.prw.code = 0;
1425 break;
1426 }
1427
1428 code = esas2r_write_params(a, rq,
1429 (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
1430 ioctl->data.prw.code = code;
1431
1432 esas2r_free_request(a, rq);
1433
1434 break;
1435
1436 case EXPRESS_IOCTL_DEFAULT_PARAMS:
1437
1438 esas2r_nvram_get_defaults(a,
1439 (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
1440 ioctl->data.prw.code = 1;
1441 break;
1442
1443 case EXPRESS_IOCTL_CHAN_INFO:
1444
1445 ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
1446 ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
1447 ioctl->data.chaninfo.IRQ = a->pcid->irq;
1448 ioctl->data.chaninfo.device_id = a->pcid->device;
1449 ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
1450 ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
1451 ioctl->data.chaninfo.revision_id = a->pcid->revision;
1452 ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
1453 ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
1454 ioctl->data.chaninfo.core_rev = 0;
1455 ioctl->data.chaninfo.host_no = a->host->host_no;
1456 ioctl->data.chaninfo.hbaapi_rev = 0;
1457 break;
1458
1459 case EXPRESS_IOCTL_SMP:
1460 ioctl->header.return_code = handle_smp_ioctl(a,
1461 &ioctl->data.
1462 ioctl_smp);
1463 break;
1464
1465 case EXPRESS_CSMI:
1466 ioctl->header.return_code =
1467 handle_csmi_ioctl(a, &ioctl->data.csmi);
1468 break;
1469
1470 case EXPRESS_IOCTL_HBA:
1471 ioctl->header.return_code = handle_hba_ioctl(a,
1472 &ioctl->data.
1473 ioctl_hba);
1474 break;
1475
1476 case EXPRESS_IOCTL_VDA:
1477 err = esas2r_write_vda(a,
1478 (char *)&ioctl->data.ioctl_vda,
1479 0,
1480 sizeof(struct atto_ioctl_vda) +
1481 ioctl->data.ioctl_vda.data_length);
1482
1483 if (err >= 0) {
1484 err = esas2r_read_vda(a,
1485 (char *)&ioctl->data.ioctl_vda,
1486 0,
1487 sizeof(struct atto_ioctl_vda) +
1488 ioctl->data.ioctl_vda.data_length);
1489 }
1490
1491
1492
1493
1494 break;
1495
1496 case EXPRESS_IOCTL_GET_MOD_INFO:
1497
1498 ioctl->data.modinfo.adapter = a;
1499 ioctl->data.modinfo.pci_dev = a->pcid;
1500 ioctl->data.modinfo.scsi_host = a->host;
1501 ioctl->data.modinfo.host_no = a->host->host_no;
1502
1503 break;
1504
1505 default:
1506 esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
1507 ioctl->header.return_code = IOCTL_ERR_INVCMD;
1508 }
1509
1510ioctl_done:
1511
1512 if (err < 0) {
1513 esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
1514 cmd);
1515
1516 switch (err) {
1517 case -ENOMEM:
1518 case -EBUSY:
1519 ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
1520 break;
1521
1522 case -ENOSYS:
1523 case -EINVAL:
1524 ioctl->header.return_code = IOCTL_INVALID_PARAM;
1525 break;
1526 }
1527
1528 ioctl->header.return_code = IOCTL_GENERAL_ERROR;
1529 }
1530
1531 /* Always copy the buffer back, if only to pick up the status */
1532 err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
1533 if (err != 0) {
1534 esas2r_log(ESAS2R_LOG_WARN,
1535 "ioctl_handler copy_to_user didn't copy "
1536 "everything (err %d, cmd %d)", err,
1537 cmd);
1538 kfree(ioctl);
1539
1540 return -EFAULT;
1541 }
1542
1543 kfree(ioctl);
1544
1545 return 0;
1546}
1547
1548int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
1549{
1550 return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
1551}
1552
1553static void free_fw_buffers(struct esas2r_adapter *a)
1554{
1555 if (a->firmware.data) {
1556 dma_free_coherent(&a->pcid->dev,
1557 (size_t)a->firmware.orig_len,
1558 a->firmware.data,
1559 (dma_addr_t)a->firmware.phys);
1560
1561 a->firmware.data = NULL;
1562 }
1563}
1564
1565static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
1566{
1567 free_fw_buffers(a);
1568
1569 a->firmware.orig_len = length;
1570
1571 a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
1572 (size_t)length,
1573 (dma_addr_t *)&a->firmware.
1574 phys,
1575 GFP_KERNEL);
1576
1577 if (!a->firmware.data) {
1578 esas2r_debug("buffer alloc failed!");
1579 return 0;
1580 }
1581
1582 return 1;
1583}
1584
1585/* Handle a call to read firmware. */
1586int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
1587{
1588 esas2r_trace_enter();
1589 /* if the cached header is a status, simply copy it over and return. */
1590 if (a->firmware.state == FW_STATUS_ST) {
1591 int size = min_t(int, count, sizeof(a->firmware.header));
1592 esas2r_trace_exit();
1593 memcpy(buf, &a->firmware.header, size);
1594 esas2r_debug("esas2r_read_fw: STATUS size %d", size);
1595 return size;
1596 }
1597
1598 /*
1599 * if the cached header is a command, do it if at
1600 * offset 0, otherwise copy the pieces.
1601 */
1602
1603 if (a->firmware.state == FW_COMMAND_ST) {
1604 u32 length = a->firmware.header.length;
1605 esas2r_trace_exit();
1606
1607 esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
1608 length,
1609 off);
1610
1611 if (off == 0) {
1612 if (a->firmware.header.action == FI_ACT_UP) {
1613 if (!allocate_fw_buffers(a, length))
1614 return -ENOMEM;
1615
1616
1617 /* copy header over */
1618
1619 memcpy(a->firmware.data,
1620 &a->firmware.header,
1621 sizeof(a->firmware.header));
1622
1623 do_fm_api(a,
1624 (struct esas2r_flash_img *)a->firmware.data);
1625 } else if (a->firmware.header.action == FI_ACT_UPSZ) {
1626 int size =
1627 min((int)count,
1628 (int)sizeof(a->firmware.header));
1629 do_fm_api(a, &a->firmware.header);
1630 memcpy(buf, &a->firmware.header, size);
1631 esas2r_debug("FI_ACT_UPSZ size %d", size);
1632 return size;
1633 } else {
1634 esas2r_debug("invalid action %d",
1635 a->firmware.header.action);
1636 return -ENOSYS;
1637 }
1638 }
1639
1640 if (count + off > length)
1641 count = length - off;
1642
1643 if (count < 0)
1644 return 0;
1645
1646 if (!a->firmware.data) {
1647 esas2r_debug(
1648 "read: nonzero offset but no buffer available!");
1649 return -ENOMEM;
1650 }
1651
1652 esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
1653 count,
1654 length);
1655
1656 memcpy(buf, &a->firmware.data[off], count);
1657
1658 /* when done, release the buffer */
1659
1660 if (length <= off + count) {
1661 esas2r_debug("esas2r_read_fw: freeing buffer!");
1662
1663 free_fw_buffers(a);
1664 }
1665
1666 return count;
1667 }
1668
1669 esas2r_trace_exit();
1670 esas2r_debug("esas2r_read_fw: invalid firmware state %d",
1671 a->firmware.state);
1672
1673 return -EINVAL;
1674}
1675
1676/* Handle a call to write firmware. */
1677int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
1678 int count)
1679{
1680 u32 length;
1681
1682 if (off == 0) {
1683 struct esas2r_flash_img *header =
1684 (struct esas2r_flash_img *)buf;
1685
1686 /* assume version 0 flash image */
1687
1688 int min_size = sizeof(struct esas2r_flash_img_v0);
1689
1690 a->firmware.state = FW_INVALID_ST;
1691
1692 /* validate the version field first */
1693
1694 if (count < 4
1695 || header->fi_version > FI_VERSION_1) {
1696 esas2r_debug(
1697 "esas2r_write_fw: short header or invalid version");
1698 return -EINVAL;
1699 }
1700
1701 /* See if its a version 1 flash image */
1702
1703 if (header->fi_version == FI_VERSION_1)
1704 min_size = sizeof(struct esas2r_flash_img);
1705
1706 /* If this is the start, the header must be full and valid. */
1707 if (count < min_size) {
1708 esas2r_debug("esas2r_write_fw: short header, aborting");
1709 return -EINVAL;
1710 }
1711
1712 /* Make sure the size is reasonable. */
1713 length = header->length;
1714
1715 if (length > 1024 * 1024) {
1716 esas2r_debug(
1717 "esas2r_write_fw: hosed, length %d fi_version %d",
1718 length, header->fi_version);
1719 return -EINVAL;
1720 }
1721
1722 /*
1723 * If this is a write command, allocate memory because
1724 * we have to cache everything. otherwise, just cache
1725 * the header, because the read op will do the command.
1726 */
1727
1728 if (header->action == FI_ACT_DOWN) {
1729 if (!allocate_fw_buffers(a, length))
1730 return -ENOMEM;
1731
1732 /*
1733 * Store the command, so there is context on subsequent
1734 * calls.
1735 */
1736 memcpy(&a->firmware.header,
1737 buf,
1738 sizeof(*header));
1739 } else if (header->action == FI_ACT_UP
1740 || header->action == FI_ACT_UPSZ) {
1741 /* Save the command, result will be picked up on read */
1742 memcpy(&a->firmware.header,
1743 buf,
1744 sizeof(*header));
1745
1746 a->firmware.state = FW_COMMAND_ST;
1747
1748 esas2r_debug(
1749 "esas2r_write_fw: COMMAND, count %d, action %d ",
1750 count, header->action);
1751
1752 /*
1753 * Pretend we took the whole buffer,
1754 * so we don't get bothered again.
1755 */
1756
1757 return count;
1758 } else {
1759 esas2r_debug("esas2r_write_fw: invalid action %d ",
1760 a->firmware.header.action);
1761 return -ENOSYS;
1762 }
1763 } else {
1764 length = a->firmware.header.length;
1765 }
1766
1767 /*
1768 * We only get here on a download command, regardless of offset.
1769 * the chunks written by the system need to be cached, and when
1770 * the final one arrives, issue the fmapi command.
1771 */
1772
1773 if (off + count > length)
1774 count = length - off;
1775
1776 if (count > 0) {
1777 esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
1778 count,
1779 length);
1780
1781 /*
1782 * On a full upload, the system tries sending the whole buffer.
1783 * there's nothing to do with it, so just drop it here, before
1784 * trying to copy over into unallocated memory!
1785 */
1786 if (a->firmware.header.action == FI_ACT_UP)
1787 return count;
1788
1789 if (!a->firmware.data) {
1790 esas2r_debug(
1791 "write: nonzero offset but no buffer available!");
1792 return -ENOMEM;
1793 }
1794
1795 memcpy(&a->firmware.data[off], buf, count);
1796
1797 if (length == off + count) {
1798 do_fm_api(a,
1799 (struct esas2r_flash_img *)a->firmware.data);
1800
1801 /*
1802 * Now copy the header result to be picked up by the
1803 * next read
1804 */
1805 memcpy(&a->firmware.header,
1806 a->firmware.data,
1807 sizeof(a->firmware.header));
1808
1809 a->firmware.state = FW_STATUS_ST;
1810
1811 esas2r_debug("write completed");
1812
1813 /*
1814 * Since the system has the data buffered, the only way
1815 * this can leak is if a root user writes a program
1816 * that writes a shorter buffer than it claims, and the
1817 * copyin fails.
1818 */
1819 free_fw_buffers(a);
1820 }
1821 }
1822
1823 return count;
1824}
1825
1826/* Callback for the completion of a VDA request. */
1827static void vda_complete_req(struct esas2r_adapter *a,
1828 struct esas2r_request *rq)
1829{
1830 a->vda_command_done = 1;
1831 wake_up_interruptible(&a->vda_waiter);
1832}
1833
1834/* Scatter/gather callback for VDA requests */
1835static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
1836{
1837 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
1838 int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
1839
1840 (*addr) = a->ppvda_buffer + offset;
1841 return VDA_MAX_BUFFER_SIZE - offset;
1842}
1843
1844/* Handle a call to read a VDA command. */
1845int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
1846{
1847 if (!a->vda_buffer)
1848 return -ENOMEM;
1849
1850 if (off == 0) {
1851 struct esas2r_request *rq;
1852 struct atto_ioctl_vda *vi =
1853 (struct atto_ioctl_vda *)a->vda_buffer;
1854 struct esas2r_sg_context sgc;
1855 bool wait_for_completion;
1856
1857 /*
1858 * Presumeably, someone has already written to the vda_buffer,
1859 * and now they are reading the node the response, so now we
1860 * will actually issue the request to the chip and reply.
1861 */
1862
1863 /* allocate a request */
1864 rq = esas2r_alloc_request(a);
1865 if (rq == NULL) {
1866 esas2r_debug("esas2r_read_vda: out of requestss");
1867 return -EBUSY;
1868 }
1869
1870 rq->comp_cb = vda_complete_req;
1871
1872 sgc.first_req = rq;
1873 sgc.adapter = a;
1874 sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
1875 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
1876
1877 a->vda_command_done = 0;
1878
1879 wait_for_completion =
1880 esas2r_process_vda_ioctl(a, vi, rq, &sgc);
1881
1882 if (wait_for_completion) {
1883 /* now wait around for it to complete. */
1884
1885 while (!a->vda_command_done)
1886 wait_event_interruptible(a->vda_waiter,
1887 a->vda_command_done);
1888 }
1889
1890 esas2r_free_request(a, (struct esas2r_request *)rq);
1891 }
1892
1893 if (off > VDA_MAX_BUFFER_SIZE)
1894 return 0;
1895
1896 if (count + off > VDA_MAX_BUFFER_SIZE)
1897 count = VDA_MAX_BUFFER_SIZE - off;
1898
1899 if (count < 0)
1900 return 0;
1901
1902 memcpy(buf, a->vda_buffer + off, count);
1903
1904 return count;
1905}
1906
1907/* Handle a call to write a VDA command. */
1908int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
1909 int count)
1910{
1911 /*
1912 * allocate memory for it, if not already done. once allocated,
1913 * we will keep it around until the driver is unloaded.
1914 */
1915
1916 if (!a->vda_buffer) {
1917 dma_addr_t dma_addr;
1918 a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
1919 (size_t)
1920 VDA_MAX_BUFFER_SIZE,
1921 &dma_addr,
1922 GFP_KERNEL);
1923
1924 a->ppvda_buffer = dma_addr;
1925 }
1926
1927 if (!a->vda_buffer)
1928 return -ENOMEM;
1929
1930 if (off > VDA_MAX_BUFFER_SIZE)
1931 return 0;
1932
1933 if (count + off > VDA_MAX_BUFFER_SIZE)
1934 count = VDA_MAX_BUFFER_SIZE - off;
1935
1936 if (count < 1)
1937 return 0;
1938
1939 memcpy(a->vda_buffer + off, buf, count);
1940
1941 return count;
1942}
1943
1944/* Callback for the completion of an FS_API request.*/
1945static void fs_api_complete_req(struct esas2r_adapter *a,
1946 struct esas2r_request *rq)
1947{
1948 a->fs_api_command_done = 1;
1949
1950 wake_up_interruptible(&a->fs_api_waiter);
1951}
1952
1953/* Scatter/gather callback for VDA requests */
1954static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
1955{
1956 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
1957 struct esas2r_ioctl_fs *fs =
1958 (struct esas2r_ioctl_fs *)a->fs_api_buffer;
1959 u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
1960
1961 (*addr) = a->ppfs_api_buffer + offset;
1962
1963 return a->fs_api_buffer_size - offset;
1964}
1965
1966/* Handle a call to read firmware via FS_API. */
1967int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
1968{
1969 if (!a->fs_api_buffer)
1970 return -ENOMEM;
1971
1972 if (off == 0) {
1973 struct esas2r_request *rq;
1974 struct esas2r_sg_context sgc;
1975 struct esas2r_ioctl_fs *fs =
1976 (struct esas2r_ioctl_fs *)a->fs_api_buffer;
1977
1978 /* If another flash request is already in progress, return. */
1979 if (down_interruptible(&a->fs_api_semaphore)) {
1980busy:
1981 fs->status = ATTO_STS_OUT_OF_RSRC;
1982 return -EBUSY;
1983 }
1984
1985 /*
1986 * Presumeably, someone has already written to the
1987 * fs_api_buffer, and now they are reading the node the
1988 * response, so now we will actually issue the request to the
1989 * chip and reply. Allocate a request
1990 */
1991
1992 rq = esas2r_alloc_request(a);
1993 if (rq == NULL) {
1994 esas2r_debug("esas2r_read_fs: out of requests");
1995 up(&a->fs_api_semaphore);
1996 goto busy;
1997 }
1998
1999 rq->comp_cb = fs_api_complete_req;
2000
2001 /* Set up the SGCONTEXT for to build the s/g table */
2002
2003 sgc.cur_offset = fs->data;
2004 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
2005
2006 a->fs_api_command_done = 0;
2007
2008 if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
2009 if (fs->status == ATTO_STS_OUT_OF_RSRC)
2010 count = -EBUSY;
2011
2012 goto dont_wait;
2013 }
2014
2015 /* Now wait around for it to complete. */
2016
2017 while (!a->fs_api_command_done)
2018 wait_event_interruptible(a->fs_api_waiter,
2019 a->fs_api_command_done);
2020 ;
2021dont_wait:
2022 /* Free the request and keep going */
2023 up(&a->fs_api_semaphore);
2024 esas2r_free_request(a, (struct esas2r_request *)rq);
2025
2026 /* Pick up possible error code from above */
2027 if (count < 0)
2028 return count;
2029 }
2030
2031 if (off > a->fs_api_buffer_size)
2032 return 0;
2033
2034 if (count + off > a->fs_api_buffer_size)
2035 count = a->fs_api_buffer_size - off;
2036
2037 if (count < 0)
2038 return 0;
2039
2040 memcpy(buf, a->fs_api_buffer + off, count);
2041
2042 return count;
2043}
2044
2045/* Handle a call to write firmware via FS_API. */
2046int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
2047 int count)
2048{
2049 if (off == 0) {
2050 struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
2051 u32 length = fs->command.length + offsetof(
2052 struct esas2r_ioctl_fs,
2053 data);
2054
2055 /*
2056 * Special case, for BEGIN commands, the length field
2057 * is lying to us, so just get enough for the header.
2058 */
2059
2060 if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
2061 length = offsetof(struct esas2r_ioctl_fs, data);
2062
2063 /*
2064 * Beginning a command. We assume we'll get at least
2065 * enough in the first write so we can look at the
2066 * header and see how much we need to alloc.
2067 */
2068
2069 if (count < offsetof(struct esas2r_ioctl_fs, data))
2070 return -EINVAL;
2071
2072 /* Allocate a buffer or use the existing buffer. */
2073 if (a->fs_api_buffer) {
2074 if (a->fs_api_buffer_size < length) {
2075 /* Free too-small buffer and get a new one */
2076 dma_free_coherent(&a->pcid->dev,
2077 (size_t)a->fs_api_buffer_size,
2078 a->fs_api_buffer,
2079 (dma_addr_t)a->ppfs_api_buffer);
2080
2081 goto re_allocate_buffer;
2082 }
2083 } else {
2084re_allocate_buffer:
2085 a->fs_api_buffer_size = length;
2086
2087 a->fs_api_buffer = (u8 *)dma_alloc_coherent(
2088 &a->pcid->dev,
2089 (size_t)a->fs_api_buffer_size,
2090 (dma_addr_t *)&a->ppfs_api_buffer,
2091 GFP_KERNEL);
2092 }
2093 }
2094
2095 if (!a->fs_api_buffer)
2096 return -ENOMEM;
2097
2098 if (off > a->fs_api_buffer_size)
2099 return 0;
2100
2101 if (count + off > a->fs_api_buffer_size)
2102 count = a->fs_api_buffer_size - off;
2103
2104 if (count < 1)
2105 return 0;
2106
2107 memcpy(a->fs_api_buffer + off, buf, count);
2108
2109 return count;
2110}
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
new file mode 100644
index 000000000000..9bf285df58dd
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -0,0 +1,254 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_log.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46/*
47 * this module within the driver is tasked with providing logging functionality.
48 * the event_log_level module parameter controls the level of messages that are
49 * written to the system log. the default level of messages that are written
50 * are critical and warning messages. if other types of messages are desired,
51 * one simply needs to load the module with the correct value for the
52 * event_log_level module parameter. for example:
53 *
54 * insmod <module> event_log_level=1
55 *
56 * will load the module and only critical events will be written by this module
57 * to the system log. if critical, warning, and information-level messages are
58 * desired, the correct value for the event_log_level module parameter
59 * would be as follows:
60 *
61 * insmod <module> event_log_level=3
62 */
63
64#define EVENT_LOG_BUFF_SIZE 1024
65
66static long event_log_level = ESAS2R_LOG_DFLT;
67
68module_param(event_log_level, long, S_IRUGO | S_IRUSR);
69MODULE_PARM_DESC(event_log_level,
70 "Specifies the level of events to report to the system log. Critical and warning level events are logged by default.");
71
72/* A shared buffer to use for formatting messages. */
73static char event_buffer[EVENT_LOG_BUFF_SIZE];
74
75/* A lock to protect the shared buffer used for formatting messages. */
76static DEFINE_SPINLOCK(event_buffer_lock);
77
78/**
79 * translates an esas2r-defined logging event level to a kernel logging level.
80 *
81 * @param [in] level the esas2r-defined logging event level to translate
82 *
83 * @return the corresponding kernel logging level.
84 */
85static const char *translate_esas2r_event_level_to_kernel(const long level)
86{
87 switch (level) {
88 case ESAS2R_LOG_CRIT:
89 return KERN_CRIT;
90
91 case ESAS2R_LOG_WARN:
92 return KERN_WARNING;
93
94 case ESAS2R_LOG_INFO:
95 return KERN_INFO;
96
97 case ESAS2R_LOG_DEBG:
98 case ESAS2R_LOG_TRCE:
99 default:
100 return KERN_DEBUG;
101 }
102}
103
104/**
105 * the master logging function. this function will format the message as
106 * outlined by the formatting string, the input device information and the
107 * substitution arguments and output the resulting string to the system log.
108 *
109 * @param [in] level the event log level of the message
110 * @param [in] dev the device information
111 * @param [in] format the formatting string for the message
112 * @param [in] args the substition arguments to the formatting string
113 *
114 * @return 0 on success, or -1 if an error occurred.
115 */
116static int esas2r_log_master(const long level,
117 const struct device *dev,
118 const char *format,
119 va_list args)
120{
121 if (level <= event_log_level) {
122 unsigned long flags = 0;
123 int retval = 0;
124 char *buffer = event_buffer;
125 size_t buflen = EVENT_LOG_BUFF_SIZE;
126 const char *fmt_nodev = "%s%s: ";
127 const char *fmt_dev = "%s%s [%s, %s, %s]";
128 const char *slevel =
129 translate_esas2r_event_level_to_kernel(level);
130
131 spin_lock_irqsave(&event_buffer_lock, flags);
132
133 if (buffer == NULL) {
134 spin_unlock_irqrestore(&event_buffer_lock, flags);
135 return -1;
136 }
137
138 memset(buffer, 0, buflen);
139
140 /*
141 * format the level onto the beginning of the string and do
142 * some pointer arithmetic to move the pointer to the point
143 * where the actual message can be inserted.
144 */
145
146 if (dev == NULL) {
147 snprintf(buffer, buflen, fmt_nodev, slevel,
148 ESAS2R_DRVR_NAME);
149 } else {
150 snprintf(buffer, buflen, fmt_dev, slevel,
151 ESAS2R_DRVR_NAME,
152 (dev->driver ? dev->driver->name : "unknown"),
153 (dev->bus ? dev->bus->name : "unknown"),
154 dev_name(dev));
155 }
156
157 buffer += strlen(event_buffer);
158 buflen -= strlen(event_buffer);
159
160 retval = vsnprintf(buffer, buflen, format, args);
161 if (retval < 0) {
162 spin_unlock_irqrestore(&event_buffer_lock, flags);
163 return -1;
164 }
165
166 /*
167 * Put a line break at the end of the formatted string so that
168 * we don't wind up with run-on messages. only append if there
169 * is enough space in the buffer.
170 */
171 if (strlen(event_buffer) < buflen)
172 strcat(buffer, "\n");
173
174 printk(event_buffer);
175
176 spin_unlock_irqrestore(&event_buffer_lock, flags);
177 }
178
179 return 0;
180}
181
182/**
183 * formats and logs a message to the system log.
184 *
185 * @param [in] level the event level of the message
186 * @param [in] format the formating string for the message
187 * @param [in] ... the substitution arguments to the formatting string
188 *
189 * @return 0 on success, or -1 if an error occurred.
190 */
191int esas2r_log(const long level, const char *format, ...)
192{
193 int retval = 0;
194 va_list args;
195
196 va_start(args, format);
197
198 retval = esas2r_log_master(level, NULL, format, args);
199
200 va_end(args);
201
202 return retval;
203}
204
205/**
206 * formats and logs a message to the system log. this message will include
207 * device information.
208 *
209 * @param [in] level the event level of the message
210 * @param [in] dev the device information
211 * @param [in] format the formatting string for the message
212 * @param [in] ... the substitution arguments to the formatting string
213 *
214 * @return 0 on success, or -1 if an error occurred.
215 */
216int esas2r_log_dev(const long level,
217 const struct device *dev,
218 const char *format,
219 ...)
220{
221 int retval = 0;
222 va_list args;
223
224 va_start(args, format);
225
226 retval = esas2r_log_master(level, dev, format, args);
227
228 va_end(args);
229
230 return retval;
231}
232
233/**
234 * formats and logs a message to the system log. this message will include
235 * device information.
236 *
237 * @param [in] level the event level of the message
238 * @param [in] buf
239 * @param [in] len
240 *
241 * @return 0 on success, or -1 if an error occurred.
242 */
243int esas2r_log_hexdump(const long level,
244 const void *buf,
245 size_t len)
246{
247 if (level <= event_log_level) {
248 print_hex_dump(translate_esas2r_event_level_to_kernel(level),
249 "", DUMP_PREFIX_OFFSET, 16, 1, buf,
250 len, true);
251 }
252
253 return 1;
254}
diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
new file mode 100644
index 000000000000..7b6397bb5b94
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.h
@@ -0,0 +1,118 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_log.h
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#ifndef __esas2r_log_h__
45#define __esas2r_log_h__
46
47struct device;
48
49enum {
50 ESAS2R_LOG_NONE = 0, /* no events logged */
51 ESAS2R_LOG_CRIT = 1, /* critical events */
52 ESAS2R_LOG_WARN = 2, /* warning events */
53 ESAS2R_LOG_INFO = 3, /* info events */
54 ESAS2R_LOG_DEBG = 4, /* debugging events */
55 ESAS2R_LOG_TRCE = 5, /* tracing events */
56
57#ifdef ESAS2R_TRACE
58 ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE
59#else
60 ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN
61#endif
62};
63
64int esas2r_log(const long level, const char *format, ...);
65int esas2r_log_dev(const long level,
66 const struct device *dev,
67 const char *format,
68 ...);
69int esas2r_log_hexdump(const long level,
70 const void *buf,
71 size_t len);
72
73/*
74 * the following macros are provided specifically for debugging and tracing
75 * messages. esas2r_debug() is provided for generic non-hardware layer
76 * debugging and tracing events. esas2r_hdebug is provided specifically for
77 * hardware layer debugging and tracing events.
78 */
79
80#ifdef ESAS2R_DEBUG
81#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
82#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
83#else
84#define esas2r_debug(f, args ...)
85#define esas2r_hdebug(f, args ...)
86#endif /* ESAS2R_DEBUG */
87
88/*
89 * the following macros are provided in order to trace the driver and catch
90 * some more serious bugs. be warned, enabling these macros may *severely*
91 * impact performance.
92 */
93
94#ifdef ESAS2R_TRACE
95#define esas2r_bugon() \
96 do { \
97 esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \
98 " - dumping stack and stopping kernel", __func__, \
99 __LINE__); \
100 dump_stack(); \
101 BUG(); \
102 } while (0)
103
104#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \
105 __func__, __FILE__, __LINE__)
106#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \
107 __func__, __FILE__, __LINE__)
108#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \
109 f, __func__, __FILE__, __LINE__, \
110 ## args)
111#else
112#define esas2r_bugon()
113#define esas2r_trace_enter()
114#define esas2r_trace_exit()
115#define esas2r_trace(f, args ...)
116#endif /* ESAS2R_TRACE */
117
118#endif /* __esas2r_log_h__ */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
new file mode 100644
index 000000000000..4abf1272e1eb
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -0,0 +1,2032 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_main.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver");
47MODULE_AUTHOR("ATTO Technology, Inc.");
48MODULE_LICENSE("GPL");
49MODULE_VERSION(ESAS2R_VERSION_STR);
50
51/* global definitions */
52
53static int found_adapters;
54struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS];
55
56#define ESAS2R_VDA_EVENT_PORT1 54414
57#define ESAS2R_VDA_EVENT_PORT2 54415
58#define ESAS2R_VDA_EVENT_SOCK_COUNT 2
59
60static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
61{
62 struct device *dev = container_of(kobj, struct device, kobj);
63 struct Scsi_Host *host = class_to_shost(dev);
64
65 return (struct esas2r_adapter *)host->hostdata;
66}
67
68static ssize_t read_fw(struct file *file, struct kobject *kobj,
69 struct bin_attribute *attr,
70 char *buf, loff_t off, size_t count)
71{
72 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
73
74 return esas2r_read_fw(a, buf, off, count);
75}
76
77static ssize_t write_fw(struct file *file, struct kobject *kobj,
78 struct bin_attribute *attr,
79 char *buf, loff_t off, size_t count)
80{
81 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
82
83 return esas2r_write_fw(a, buf, off, count);
84}
85
86static ssize_t read_fs(struct file *file, struct kobject *kobj,
87 struct bin_attribute *attr,
88 char *buf, loff_t off, size_t count)
89{
90 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
91
92 return esas2r_read_fs(a, buf, off, count);
93}
94
95static ssize_t write_fs(struct file *file, struct kobject *kobj,
96 struct bin_attribute *attr,
97 char *buf, loff_t off, size_t count)
98{
99 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
100 int length = min(sizeof(struct esas2r_ioctl_fs), count);
101 int result = 0;
102
103 result = esas2r_write_fs(a, buf, off, count);
104
105 if (result < 0)
106 result = 0;
107
108 return length;
109}
110
111static ssize_t read_vda(struct file *file, struct kobject *kobj,
112 struct bin_attribute *attr,
113 char *buf, loff_t off, size_t count)
114{
115 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
116
117 return esas2r_read_vda(a, buf, off, count);
118}
119
120static ssize_t write_vda(struct file *file, struct kobject *kobj,
121 struct bin_attribute *attr,
122 char *buf, loff_t off, size_t count)
123{
124 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
125
126 return esas2r_write_vda(a, buf, off, count);
127}
128
129static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
130 struct bin_attribute *attr,
131 char *buf, loff_t off, size_t count)
132{
133 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
134 int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
135
136 memcpy(buf, a->nvram, length);
137 return length;
138}
139
140static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
141 struct bin_attribute *attr,
142 char *buf, loff_t off, size_t count)
143{
144 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
145 struct esas2r_request *rq;
146 int result = -EFAULT;
147
148 rq = esas2r_alloc_request(a);
149 if (rq == NULL)
150 return -ENOMEM;
151
152 if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
153 result = count;
154
155 esas2r_free_request(a, rq);
156
157 return result;
158}
159
160static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
161 struct bin_attribute *attr,
162 char *buf, loff_t off, size_t count)
163{
164 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
165
166 esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf);
167
168 return sizeof(struct esas2r_sas_nvram);
169}
170
171static ssize_t read_hw(struct file *file, struct kobject *kobj,
172 struct bin_attribute *attr,
173 char *buf, loff_t off, size_t count)
174{
175 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
176 int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
177
178 if (!a->local_atto_ioctl)
179 return -ENOMEM;
180
181 if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS)
182 return -ENOMEM;
183
184 memcpy(buf, a->local_atto_ioctl, length);
185
186 return length;
187}
188
189static ssize_t write_hw(struct file *file, struct kobject *kobj,
190 struct bin_attribute *attr,
191 char *buf, loff_t off, size_t count)
192{
193 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
194 int length = min(sizeof(struct atto_ioctl), count);
195
196 if (!a->local_atto_ioctl) {
197 a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
198 GFP_KERNEL);
199 if (a->local_atto_ioctl == NULL) {
200 esas2r_log(ESAS2R_LOG_WARN,
201 "write_hw kzalloc failed for %d bytes",
202 sizeof(struct atto_ioctl));
203 return -ENOMEM;
204 }
205 }
206
207 memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl));
208 memcpy(a->local_atto_ioctl, buf, length);
209
210 return length;
211}
212
213#define ESAS2R_RW_BIN_ATTR(_name) \
214 struct bin_attribute bin_attr_ ## _name = { \
215 .attr = \
216 { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
217 .size = 0, \
218 .read = read_ ## _name, \
219 .write = write_ ## _name }
220
221ESAS2R_RW_BIN_ATTR(fw);
222ESAS2R_RW_BIN_ATTR(fs);
223ESAS2R_RW_BIN_ATTR(vda);
224ESAS2R_RW_BIN_ATTR(hw);
225ESAS2R_RW_BIN_ATTR(live_nvram);
226
227struct bin_attribute bin_attr_default_nvram = {
228 .attr = { .name = "default_nvram", .mode = S_IRUGO },
229 .size = 0,
230 .read = read_default_nvram,
231 .write = NULL
232};
233
234static struct scsi_host_template driver_template = {
235 .module = THIS_MODULE,
236 .show_info = esas2r_show_info,
237 .name = ESAS2R_LONGNAME,
238 .release = esas2r_release,
239 .info = esas2r_info,
240 .ioctl = esas2r_ioctl,
241 .queuecommand = esas2r_queuecommand,
242 .eh_abort_handler = esas2r_eh_abort,
243 .eh_device_reset_handler = esas2r_device_reset,
244 .eh_bus_reset_handler = esas2r_bus_reset,
245 .eh_host_reset_handler = esas2r_host_reset,
246 .eh_target_reset_handler = esas2r_target_reset,
247 .can_queue = 128,
248 .this_id = -1,
249 .sg_tablesize = SCSI_MAX_SG_SEGMENTS,
250 .cmd_per_lun =
251 ESAS2R_DEFAULT_CMD_PER_LUN,
252 .present = 0,
253 .unchecked_isa_dma = 0,
254 .use_clustering = ENABLE_CLUSTERING,
255 .emulated = 0,
256 .proc_name = ESAS2R_DRVR_NAME,
257 .slave_configure = esas2r_slave_configure,
258 .slave_alloc = esas2r_slave_alloc,
259 .slave_destroy = esas2r_slave_destroy,
260 .change_queue_depth = esas2r_change_queue_depth,
261 .change_queue_type = esas2r_change_queue_type,
262 .max_sectors = 0xFFFF,
263};
264
265int sgl_page_size = 512;
266module_param(sgl_page_size, int, 0);
267MODULE_PARM_DESC(sgl_page_size,
268 "Scatter/gather list (SGL) page size in number of S/G "
269 "entries. If your application is doing a lot of very large "
270 "transfers, you may want to increase the SGL page size. "
271 "Default 512.");
272
273int num_sg_lists = 1024;
274module_param(num_sg_lists, int, 0);
275MODULE_PARM_DESC(num_sg_lists,
276 "Number of scatter/gather lists. Default 1024.");
277
278int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
279module_param(sg_tablesize, int, 0);
280MODULE_PARM_DESC(sg_tablesize,
281 "Maximum number of entries in a scatter/gather table.");
282
283int num_requests = 256;
284module_param(num_requests, int, 0);
285MODULE_PARM_DESC(num_requests,
286 "Number of requests. Default 256.");
287
288int num_ae_requests = 4;
289module_param(num_ae_requests, int, 0);
290MODULE_PARM_DESC(num_ae_requests,
291 "Number of VDA asynchromous event requests. Default 4.");
292
293int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
294module_param(cmd_per_lun, int, 0);
295MODULE_PARM_DESC(cmd_per_lun,
296 "Maximum number of commands per LUN. Default "
297 DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) ".");
298
299int can_queue = 128;
300module_param(can_queue, int, 0);
301MODULE_PARM_DESC(can_queue,
302 "Maximum number of commands per adapter. Default 128.");
303
304int esas2r_max_sectors = 0xFFFF;
305module_param(esas2r_max_sectors, int, 0);
306MODULE_PARM_DESC(esas2r_max_sectors,
307 "Maximum number of disk sectors in a single data transfer. "
308 "Default 65535 (largest possible setting).");
309
310int interrupt_mode = 1;
311module_param(interrupt_mode, int, 0);
312MODULE_PARM_DESC(interrupt_mode,
313 "Defines the interrupt mode to use. 0 for legacy"
314 ", 1 for MSI. Default is MSI (1).");
315
316static struct pci_device_id
317 esas2r_pci_table[] = {
318 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049,
319 0,
320 0, 0 },
321 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A,
322 0,
323 0, 0 },
324 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B,
325 0,
326 0, 0 },
327 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C,
328 0,
329 0, 0 },
330 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D,
331 0,
332 0, 0 },
333 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E,
334 0,
335 0, 0 },
336 { 0, 0, 0, 0,
337 0,
338 0, 0 }
339};
340
341MODULE_DEVICE_TABLE(pci, esas2r_pci_table);
342
343static int
344esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id);
345
346static void
347esas2r_remove(struct pci_dev *pcid);
348
349static struct pci_driver
350 esas2r_pci_driver = {
351 .name = ESAS2R_DRVR_NAME,
352 .id_table = esas2r_pci_table,
353 .probe = esas2r_probe,
354 .remove = esas2r_remove,
355 .suspend = esas2r_suspend,
356 .resume = esas2r_resume,
357};
358
359static int esas2r_probe(struct pci_dev *pcid,
360 const struct pci_device_id *id)
361{
362 struct Scsi_Host *host = NULL;
363 struct esas2r_adapter *a;
364 int err;
365
366 size_t host_alloc_size = sizeof(struct esas2r_adapter)
367 + ((num_requests) +
368 1) * sizeof(struct esas2r_request);
369
370 esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
371 "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
372 pcid->vendor,
373 pcid->device,
374 pcid->subsystem_vendor,
375 pcid->subsystem_device);
376
377 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
378 "before pci_enable_device() "
379 "enable_cnt: %d",
380 pcid->enable_cnt.counter);
381
382 err = pci_enable_device(pcid);
383 if (err != 0) {
384 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
385 "pci_enable_device() FAIL (%d)",
386 err);
387 return -ENODEV;
388 }
389
390 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
391 "pci_enable_device() OK");
392 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
393 "after pci_device_enable() enable_cnt: %d",
394 pcid->enable_cnt.counter);
395
396 host = scsi_host_alloc(&driver_template, host_alloc_size);
397 if (host == NULL) {
398 esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
399 return -ENODEV;
400 }
401
402 memset(host->hostdata, 0, host_alloc_size);
403
404 a = (struct esas2r_adapter *)host->hostdata;
405
406 esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);
407
408 /* override max LUN and max target id */
409
410 host->max_id = ESAS2R_MAX_ID + 1;
411 host->max_lun = 255;
412
413 /* we can handle 16-byte CDbs */
414
415 host->max_cmd_len = 16;
416
417 host->can_queue = can_queue;
418 host->cmd_per_lun = cmd_per_lun;
419 host->this_id = host->max_id + 1;
420 host->max_channel = 0;
421 host->unique_id = found_adapters;
422 host->sg_tablesize = sg_tablesize;
423 host->max_sectors = esas2r_max_sectors;
424
425 /* set to bus master for BIOses that don't do it for us */
426
427 esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");
428
429 pci_set_master(pcid);
430
431 if (!esas2r_init_adapter(host, pcid, found_adapters)) {
432 esas2r_log(ESAS2R_LOG_CRIT,
433 "unable to initialize device at PCI bus %x:%x",
434 pcid->bus->number,
435 pcid->devfn);
436
437 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
438 "scsi_host_put() called");
439
440 scsi_host_put(host);
441
442 return 0;
443
444 }
445
446 esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
447 host->hostdata);
448
449 pci_set_drvdata(pcid, host);
450
451 esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");
452
453 err = scsi_add_host(host, &pcid->dev);
454
455 if (err) {
456 esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
457 esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
458 "scsi_add_host() FAIL");
459
460 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
461 "scsi_host_put() called");
462
463 scsi_host_put(host);
464
465 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
466 "pci_set_drvdata(%p, NULL) called",
467 pcid);
468
469 pci_set_drvdata(pcid, NULL);
470
471 return -ENODEV;
472 }
473
474
475 esas2r_fw_event_on(a);
476
477 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
478 "scsi_scan_host() called");
479
480 scsi_scan_host(host);
481
482 /* Add sysfs binary files */
483 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
484 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
485 "Failed to create sysfs binary file: fw");
486 else
487 a->sysfs_fw_created = 1;
488
489 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
490 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
491 "Failed to create sysfs binary file: fs");
492 else
493 a->sysfs_fs_created = 1;
494
495 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
496 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
497 "Failed to create sysfs binary file: vda");
498 else
499 a->sysfs_vda_created = 1;
500
501 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
502 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
503 "Failed to create sysfs binary file: hw");
504 else
505 a->sysfs_hw_created = 1;
506
507 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
508 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
509 "Failed to create sysfs binary file: live_nvram");
510 else
511 a->sysfs_live_nvram_created = 1;
512
513 if (sysfs_create_bin_file(&host->shost_dev.kobj,
514 &bin_attr_default_nvram))
515 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
516 "Failed to create sysfs binary file: default_nvram");
517 else
518 a->sysfs_default_nvram_created = 1;
519
520 found_adapters++;
521
522 return 0;
523}
524
525static void esas2r_remove(struct pci_dev *pdev)
526{
527 struct Scsi_Host *host;
528 int index;
529
530 if (pdev == NULL) {
531 esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL");
532 return;
533 }
534
535 host = pci_get_drvdata(pdev);
536
537 if (host == NULL) {
538 /*
539 * this can happen if pci_set_drvdata was already called
540 * to clear the host pointer. if this is the case, we
541 * are okay; this channel has already been cleaned up.
542 */
543
544 return;
545 }
546
547 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
548 "esas2r_remove(%p) called; "
549 "host:%p", pdev,
550 host);
551
552 index = esas2r_cleanup(host);
553
554 if (index < 0)
555 esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev),
556 "unknown host in %s",
557 __func__);
558
559 found_adapters--;
560
561 /* if this was the last adapter, clean up the rest of the driver */
562
563 if (found_adapters == 0)
564 esas2r_cleanup(NULL);
565}
566
567static int __init esas2r_init(void)
568{
569 int i;
570
571 esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
572
573 /* verify valid parameters */
574
575 if (can_queue < 1) {
576 esas2r_log(ESAS2R_LOG_WARN,
577 "warning: can_queue must be at least 1, value "
578 "forced.");
579 can_queue = 1;
580 } else if (can_queue > 2048) {
581 esas2r_log(ESAS2R_LOG_WARN,
582 "warning: can_queue must be no larger than 2048, "
583 "value forced.");
584 can_queue = 2048;
585 }
586
587 if (cmd_per_lun < 1) {
588 esas2r_log(ESAS2R_LOG_WARN,
589 "warning: cmd_per_lun must be at least 1, value "
590 "forced.");
591 cmd_per_lun = 1;
592 } else if (cmd_per_lun > 2048) {
593 esas2r_log(ESAS2R_LOG_WARN,
594 "warning: cmd_per_lun must be no larger than "
595 "2048, value forced.");
596 cmd_per_lun = 2048;
597 }
598
599 if (sg_tablesize < 32) {
600 esas2r_log(ESAS2R_LOG_WARN,
601 "warning: sg_tablesize must be at least 32, "
602 "value forced.");
603 sg_tablesize = 32;
604 }
605
606 if (esas2r_max_sectors < 1) {
607 esas2r_log(ESAS2R_LOG_WARN,
608 "warning: esas2r_max_sectors must be at least "
609 "1, value forced.");
610 esas2r_max_sectors = 1;
611 } else if (esas2r_max_sectors > 0xffff) {
612 esas2r_log(ESAS2R_LOG_WARN,
613 "warning: esas2r_max_sectors must be no larger "
614 "than 0xffff, value forced.");
615 esas2r_max_sectors = 0xffff;
616 }
617
618 sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1);
619
620 if (sgl_page_size < SGL_PG_SZ_MIN)
621 sgl_page_size = SGL_PG_SZ_MIN;
622 else if (sgl_page_size > SGL_PG_SZ_MAX)
623 sgl_page_size = SGL_PG_SZ_MAX;
624
625 if (num_sg_lists < NUM_SGL_MIN)
626 num_sg_lists = NUM_SGL_MIN;
627 else if (num_sg_lists > NUM_SGL_MAX)
628 num_sg_lists = NUM_SGL_MAX;
629
630 if (num_requests < NUM_REQ_MIN)
631 num_requests = NUM_REQ_MIN;
632 else if (num_requests > NUM_REQ_MAX)
633 num_requests = NUM_REQ_MAX;
634
635 if (num_ae_requests < NUM_AE_MIN)
636 num_ae_requests = NUM_AE_MIN;
637 else if (num_ae_requests > NUM_AE_MAX)
638 num_ae_requests = NUM_AE_MAX;
639
640 /* set up other globals */
641
642 for (i = 0; i < MAX_ADAPTERS; i++)
643 esas2r_adapters[i] = NULL;
644
645 /* initialize */
646
647 driver_template.module = THIS_MODULE;
648
649 if (pci_register_driver(&esas2r_pci_driver) != 0)
650 esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED");
651 else
652 esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK");
653
654 if (!found_adapters) {
655 pci_unregister_driver(&esas2r_pci_driver);
656 esas2r_cleanup(NULL);
657
658 esas2r_log(ESAS2R_LOG_CRIT,
659 "driver will not be loaded because no ATTO "
660 "%s devices were found",
661 ESAS2R_DRVR_NAME);
662 return -1;
663 } else {
664 esas2r_log(ESAS2R_LOG_INFO, "found %d adapters",
665 found_adapters);
666 }
667
668 return 0;
669}
670
671/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
672static const struct file_operations esas2r_proc_fops = {
673 .compat_ioctl = esas2r_proc_ioctl,
674 .unlocked_ioctl = esas2r_proc_ioctl,
675};
676
677static struct Scsi_Host *esas2r_proc_host;
678static int esas2r_proc_major;
679
680long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
681{
682 return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
683 (int)cmd, (void __user *)arg);
684}
685
686static void __exit esas2r_exit(void)
687{
688 esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
689
690 if (esas2r_proc_major > 0) {
691 esas2r_log(ESAS2R_LOG_INFO, "unregister proc");
692
693 remove_proc_entry(ATTONODE_NAME,
694 esas2r_proc_host->hostt->proc_dir);
695 unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME);
696
697 esas2r_proc_major = 0;
698 }
699
700 esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called");
701
702 pci_unregister_driver(&esas2r_pci_driver);
703}
704
705int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
706{
707 struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
708
709 struct esas2r_target *t;
710 int dev_count = 0;
711
712 esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no);
713
714 seq_printf(m, ESAS2R_LONGNAME "\n"
715 "Driver version: "ESAS2R_VERSION_STR "\n"
716 "Flash version: %s\n"
717 "Firmware version: %s\n"
718 "Copyright "ESAS2R_COPYRIGHT_YEARS "\n"
719 "http://www.attotech.com\n"
720 "\n",
721 a->flash_rev,
722 a->fw_rev[0] ? a->fw_rev : "(none)");
723
724
725 seq_printf(m, "Adapter information:\n"
726 "--------------------\n"
727 "Model: %s\n"
728 "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n",
729 esas2r_get_model_name(a),
730 a->nvram->sas_addr[0],
731 a->nvram->sas_addr[1],
732 a->nvram->sas_addr[2],
733 a->nvram->sas_addr[3],
734 a->nvram->sas_addr[4],
735 a->nvram->sas_addr[5],
736 a->nvram->sas_addr[6],
737 a->nvram->sas_addr[7]);
738
739 seq_puts(m, "\n"
740 "Discovered devices:\n"
741 "\n"
742 " # Target ID\n"
743 "---------------\n");
744
745 for (t = a->targetdb; t < a->targetdb_end; t++)
746 if (t->buffered_target_state == TS_PRESENT) {
747 seq_printf(m, " %3d %3d\n",
748 ++dev_count,
749 (u16)(uintptr_t)(t - a->targetdb));
750 }
751
752 if (dev_count == 0)
753 seq_puts(m, "none\n");
754
755 seq_puts(m, "\n");
756 return 0;
757
758}
759
760int esas2r_release(struct Scsi_Host *sh)
761{
762 esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
763 "esas2r_release() called");
764
765 esas2r_cleanup(sh);
766 if (sh->irq)
767 free_irq(sh->irq, NULL);
768 scsi_unregister(sh);
769 return 0;
770}
771
772const char *esas2r_info(struct Scsi_Host *sh)
773{
774 struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
775 static char esas2r_info_str[512];
776
777 esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
778 "esas2r_info() called");
779
780 /*
781 * if we haven't done so already, register as a char driver
782 * and stick a node under "/proc/scsi/esas2r/ATTOnode"
783 */
784
785 if (esas2r_proc_major <= 0) {
786 esas2r_proc_host = sh;
787
788 esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME,
789 &esas2r_proc_fops);
790
791 esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev),
792 "register_chrdev (major %d)",
793 esas2r_proc_major);
794
795 if (esas2r_proc_major > 0) {
796 struct proc_dir_entry *pde;
797
798 pde = proc_create(ATTONODE_NAME, 0,
799 sh->hostt->proc_dir,
800 &esas2r_proc_fops);
801
802 if (!pde) {
803 esas2r_log_dev(ESAS2R_LOG_WARN,
804 &(sh->shost_gendev),
805 "failed to create_proc_entry");
806 esas2r_proc_major = -1;
807 }
808 }
809 }
810
811 sprintf(esas2r_info_str,
812 ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)"
813 " driver version: "ESAS2R_VERSION_STR " firmware version: "
814 "%s\n",
815 a->pcid->bus->number, a->pcid->devfn, a->pcid->irq,
816 a->fw_rev[0] ? a->fw_rev : "(none)");
817
818 return esas2r_info_str;
819}
820
821/* Callback for building a request scatter/gather list */
822static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
823{
824 u32 len;
825
826 if (likely(sgc->cur_offset == sgc->exp_offset)) {
827 /*
828 * the normal case: caller used all bytes from previous call, so
829 * expected offset is the same as the current offset.
830 */
831
832 if (sgc->sgel_count < sgc->num_sgel) {
833 /* retrieve next segment, except for first time */
834 if (sgc->exp_offset > (u8 *)0) {
835 /* advance current segment */
836 sgc->cur_sgel = sg_next(sgc->cur_sgel);
837 ++(sgc->sgel_count);
838 }
839
840
841 len = sg_dma_len(sgc->cur_sgel);
842 (*addr) = sg_dma_address(sgc->cur_sgel);
843
844 /* save the total # bytes returned to caller so far */
845 sgc->exp_offset += len;
846
847 } else {
848 len = 0;
849 }
850 } else if (sgc->cur_offset < sgc->exp_offset) {
851 /*
852 * caller did not use all bytes from previous call. need to
853 * compute the address based on current segment.
854 */
855
856 len = sg_dma_len(sgc->cur_sgel);
857 (*addr) = sg_dma_address(sgc->cur_sgel);
858
859 sgc->exp_offset -= len;
860
861 /* calculate PA based on prev segment address and offsets */
862 *addr = *addr +
863 (sgc->cur_offset - sgc->exp_offset);
864
865 sgc->exp_offset += len;
866
867 /* re-calculate length based on offset */
868 len = lower_32_bits(
869 sgc->exp_offset - sgc->cur_offset);
870 } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */
871 /*
872 * we don't expect the caller to skip ahead.
873 * cur_offset will never exceed the len we return
874 */
875 len = 0;
876 }
877
878 return len;
879}
880
881int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
882{
883 struct esas2r_adapter *a =
884 (struct esas2r_adapter *)cmd->device->host->hostdata;
885 struct esas2r_request *rq;
886 struct esas2r_sg_context sgc;
887 unsigned bufflen;
888
889 /* Assume success, if it fails we will fix the result later. */
890 cmd->result = DID_OK << 16;
891
892 if (unlikely(a->flags & AF_DEGRADED_MODE)) {
893 cmd->result = DID_NO_CONNECT << 16;
894 cmd->scsi_done(cmd);
895 return 0;
896 }
897
898 rq = esas2r_alloc_request(a);
899 if (unlikely(rq == NULL)) {
900 esas2r_debug("esas2r_alloc_request failed");
901 return SCSI_MLQUEUE_HOST_BUSY;
902 }
903
904 rq->cmd = cmd;
905 bufflen = scsi_bufflen(cmd);
906
907 if (likely(bufflen != 0)) {
908 if (cmd->sc_data_direction == DMA_TO_DEVICE)
909 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
910 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
911 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
912 }
913
914 memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
915 rq->vrq->scsi.length = cpu_to_le32(bufflen);
916 rq->target_id = cmd->device->id;
917 rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
918 rq->sense_buf = cmd->sense_buffer;
919 rq->sense_len = SCSI_SENSE_BUFFERSIZE;
920
921 esas2r_sgc_init(&sgc, a, rq, NULL);
922
923 sgc.length = bufflen;
924 sgc.cur_offset = NULL;
925
926 sgc.cur_sgel = scsi_sglist(cmd);
927 sgc.exp_offset = NULL;
928 sgc.num_sgel = scsi_dma_map(cmd);
929 sgc.sgel_count = 0;
930
931 if (unlikely(sgc.num_sgel < 0)) {
932 esas2r_free_request(a, rq);
933 return SCSI_MLQUEUE_HOST_BUSY;
934 }
935
936 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;
937
938 if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
939 scsi_dma_unmap(cmd);
940 esas2r_free_request(a, rq);
941 return SCSI_MLQUEUE_HOST_BUSY;
942 }
943
944 esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
945 (int)cmd->device->lun);
946
947 esas2r_start_request(a, rq);
948
949 return 0;
950}
951
952static void complete_task_management_request(struct esas2r_adapter *a,
953 struct esas2r_request *rq)
954{
955 (*rq->task_management_status_ptr) = rq->req_stat;
956 esas2r_free_request(a, rq);
957}
958
959/**
960 * Searches the specified queue for the specified queue for the command
961 * to abort.
962 *
963 * @param [in] a
964 * @param [in] abort_request
965 * @param [in] cmd
966 * t
967 * @return 0 on failure, 1 if command was not found, 2 if command was found
968 */
969static int esas2r_check_active_queue(struct esas2r_adapter *a,
970 struct esas2r_request **abort_request,
971 struct scsi_cmnd *cmd,
972 struct list_head *queue)
973{
974 bool found = false;
975 struct esas2r_request *ar = *abort_request;
976 struct esas2r_request *rq;
977 struct list_head *element, *next;
978
979 list_for_each_safe(element, next, queue) {
980
981 rq = list_entry(element, struct esas2r_request, req_list);
982
983 if (rq->cmd == cmd) {
984
985 /* Found the request. See what to do with it. */
986 if (queue == &a->active_list) {
987 /*
988 * We are searching the active queue, which
989 * means that we need to send an abort request
990 * to the firmware.
991 */
992 ar = esas2r_alloc_request(a);
993 if (ar == NULL) {
994 esas2r_log_dev(ESAS2R_LOG_WARN,
995 &(a->host->shost_gendev),
996 "unable to allocate an abort request for cmd %p",
997 cmd);
998 return 0; /* Failure */
999 }
1000
1001 /*
1002 * Task management request must be formatted
1003 * with a lock held.
1004 */
1005 ar->sense_len = 0;
1006 ar->vrq->scsi.length = 0;
1007 ar->target_id = rq->target_id;
1008 ar->vrq->scsi.flags |= cpu_to_le32(
1009 (u8)le32_to_cpu(rq->vrq->scsi.flags));
1010
1011 memset(ar->vrq->scsi.cdb, 0,
1012 sizeof(ar->vrq->scsi.cdb));
1013
1014 ar->vrq->scsi.flags |= cpu_to_le32(
1015 FCP_CMND_TRM);
1016 ar->vrq->scsi.u.abort_handle =
1017 rq->vrq->scsi.handle;
1018 } else {
1019 /*
1020 * The request is pending but not active on
1021 * the firmware. Just free it now and we'll
1022 * report the successful abort below.
1023 */
1024 list_del_init(&rq->req_list);
1025 esas2r_free_request(a, rq);
1026 }
1027
1028 found = true;
1029 break;
1030 }
1031
1032 }
1033
1034 if (!found)
1035 return 1; /* Not found */
1036
1037 return 2; /* found */
1038
1039
1040}
1041
1042int esas2r_eh_abort(struct scsi_cmnd *cmd)
1043{
1044 struct esas2r_adapter *a =
1045 (struct esas2r_adapter *)cmd->device->host->hostdata;
1046 struct esas2r_request *abort_request = NULL;
1047 unsigned long flags;
1048 struct list_head *queue;
1049 int result;
1050
1051 esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
1052
1053 if (a->flags & AF_DEGRADED_MODE) {
1054 cmd->result = DID_ABORT << 16;
1055
1056 scsi_set_resid(cmd, 0);
1057
1058 cmd->scsi_done(cmd);
1059
1060 return 0;
1061 }
1062
1063 spin_lock_irqsave(&a->queue_lock, flags);
1064
1065 /*
1066 * Run through the defer and active queues looking for the request
1067 * to abort.
1068 */
1069
1070 queue = &a->defer_list;
1071
1072check_active_queue:
1073
1074 result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
1075
1076 if (!result) {
1077 spin_unlock_irqrestore(&a->queue_lock, flags);
1078 return FAILED;
1079 } else if (result == 2 && (queue == &a->defer_list)) {
1080 queue = &a->active_list;
1081 goto check_active_queue;
1082 }
1083
1084 spin_unlock_irqrestore(&a->queue_lock, flags);
1085
1086 if (abort_request) {
1087 u8 task_management_status = RS_PENDING;
1088
1089 /*
1090 * the request is already active, so we need to tell
1091 * the firmware to abort it and wait for the response.
1092 */
1093
1094 abort_request->comp_cb = complete_task_management_request;
1095 abort_request->task_management_status_ptr =
1096 &task_management_status;
1097
1098 esas2r_start_request(a, abort_request);
1099
1100 if (atomic_read(&a->disable_cnt) == 0)
1101 esas2r_do_deferred_processes(a);
1102
1103 while (task_management_status == RS_PENDING)
1104 msleep(10);
1105
1106 /*
1107 * Once we get here, the original request will have been
1108 * completed by the firmware and the abort request will have
1109 * been cleaned up. we're done!
1110 */
1111
1112 return SUCCESS;
1113 }
1114
1115 /*
1116 * If we get here, either we found the inactive request and
1117 * freed it, or we didn't find it at all. Either way, success!
1118 */
1119
1120 cmd->result = DID_ABORT << 16;
1121
1122 scsi_set_resid(cmd, 0);
1123
1124 cmd->scsi_done(cmd);
1125
1126 return SUCCESS;
1127}
1128
1129static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
1130{
1131 struct esas2r_adapter *a =
1132 (struct esas2r_adapter *)cmd->device->host->hostdata;
1133
1134 if (a->flags & AF_DEGRADED_MODE)
1135 return FAILED;
1136
1137 if (host_reset)
1138 esas2r_reset_adapter(a);
1139 else
1140 esas2r_reset_bus(a);
1141
1142 /* above call sets the AF_OS_RESET flag. wait for it to clear. */
1143
1144 while (a->flags & AF_OS_RESET) {
1145 msleep(10);
1146
1147 if (a->flags & AF_DEGRADED_MODE)
1148 return FAILED;
1149 }
1150
1151 if (a->flags & AF_DEGRADED_MODE)
1152 return FAILED;
1153
1154 return SUCCESS;
1155}
1156
1157int esas2r_host_reset(struct scsi_cmnd *cmd)
1158{
1159 esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd);
1160
1161 return esas2r_host_bus_reset(cmd, true);
1162}
1163
1164int esas2r_bus_reset(struct scsi_cmnd *cmd)
1165{
1166 esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd);
1167
1168 return esas2r_host_bus_reset(cmd, false);
1169}
1170
1171static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
1172{
1173 struct esas2r_adapter *a =
1174 (struct esas2r_adapter *)cmd->device->host->hostdata;
1175 struct esas2r_request *rq;
1176 u8 task_management_status = RS_PENDING;
1177 bool completed;
1178
1179 if (a->flags & AF_DEGRADED_MODE)
1180 return FAILED;
1181
1182retry:
1183 rq = esas2r_alloc_request(a);
1184 if (rq == NULL) {
1185 if (target_reset) {
1186 esas2r_log(ESAS2R_LOG_CRIT,
1187 "unable to allocate a request for a "
1188 "target reset (%d)!",
1189 cmd->device->id);
1190 } else {
1191 esas2r_log(ESAS2R_LOG_CRIT,
1192 "unable to allocate a request for a "
1193 "device reset (%d:%d)!",
1194 cmd->device->id,
1195 cmd->device->lun);
1196 }
1197
1198
1199 return FAILED;
1200 }
1201
1202 rq->target_id = cmd->device->id;
1203 rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
1204 rq->req_stat = RS_PENDING;
1205
1206 rq->comp_cb = complete_task_management_request;
1207 rq->task_management_status_ptr = &task_management_status;
1208
1209 if (target_reset) {
1210 esas2r_debug("issuing target reset (%p) to id %d", rq,
1211 cmd->device->id);
1212 completed = esas2r_send_task_mgmt(a, rq, 0x20);
1213 } else {
1214 esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
1215 cmd->device->id, cmd->device->lun);
1216 completed = esas2r_send_task_mgmt(a, rq, 0x10);
1217 }
1218
1219 if (completed) {
1220 /* Task management cmd completed right away, need to free it. */
1221
1222 esas2r_free_request(a, rq);
1223 } else {
1224 /*
1225 * Wait for firmware to complete the request. Completion
1226 * callback will free it.
1227 */
1228 while (task_management_status == RS_PENDING)
1229 msleep(10);
1230 }
1231
1232 if (a->flags & AF_DEGRADED_MODE)
1233 return FAILED;
1234
1235 if (task_management_status == RS_BUSY) {
1236 /*
1237 * Busy, probably because we are flashing. Wait a bit and
1238 * try again.
1239 */
1240 msleep(100);
1241 goto retry;
1242 }
1243
1244 return SUCCESS;
1245}
1246
1247int esas2r_device_reset(struct scsi_cmnd *cmd)
1248{
1249 esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd);
1250
1251 return esas2r_dev_targ_reset(cmd, false);
1252
1253}
1254
1255int esas2r_target_reset(struct scsi_cmnd *cmd)
1256{
1257 esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd);
1258
1259 return esas2r_dev_targ_reset(cmd, true);
1260}
1261
1262int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
1263{
1264 esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
1265
1266 scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
1267
1268 return dev->queue_depth;
1269}
1270
1271int esas2r_change_queue_type(struct scsi_device *dev, int type)
1272{
1273 esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type);
1274
1275 if (dev->tagged_supported) {
1276 scsi_set_tag_type(dev, type);
1277
1278 if (type)
1279 scsi_activate_tcq(dev, dev->queue_depth);
1280 else
1281 scsi_deactivate_tcq(dev, dev->queue_depth);
1282 } else {
1283 type = 0;
1284 }
1285
1286 return type;
1287}
1288
1289int esas2r_slave_alloc(struct scsi_device *dev)
1290{
1291 return 0;
1292}
1293
1294int esas2r_slave_configure(struct scsi_device *dev)
1295{
1296 esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
1297 "esas2r_slave_configure()");
1298
1299 if (dev->tagged_supported) {
1300 scsi_set_tag_type(dev, MSG_SIMPLE_TAG);
1301 scsi_activate_tcq(dev, cmd_per_lun);
1302 } else {
1303 scsi_set_tag_type(dev, 0);
1304 scsi_deactivate_tcq(dev, cmd_per_lun);
1305 }
1306
1307 return 0;
1308}
1309
1310void esas2r_slave_destroy(struct scsi_device *dev)
1311{
1312 esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
1313 "esas2r_slave_destroy()");
1314}
1315
1316void esas2r_log_request_failure(struct esas2r_adapter *a,
1317 struct esas2r_request *rq)
1318{
1319 u8 reqstatus = rq->req_stat;
1320
1321 if (reqstatus == RS_SUCCESS)
1322 return;
1323
1324 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1325 if (reqstatus == RS_SCSI_ERROR) {
1326 if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
1327 esas2r_log(ESAS2R_LOG_WARN,
1328 "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x",
1329 rq->sense_buf[2], rq->sense_buf[12],
1330 rq->sense_buf[13],
1331 rq->vrq->scsi.cdb[0]);
1332 } else {
1333 esas2r_log(ESAS2R_LOG_WARN,
1334 "request failure - SCSI error CDB:%x\n",
1335 rq->vrq->scsi.cdb[0]);
1336 }
1337 } else if ((rq->vrq->scsi.cdb[0] != INQUIRY
1338 && rq->vrq->scsi.cdb[0] != REPORT_LUNS)
1339 || (reqstatus != RS_SEL
1340 && reqstatus != RS_SEL2)) {
1341 if ((reqstatus == RS_UNDERRUN) &&
1342 (rq->vrq->scsi.cdb[0] == INQUIRY)) {
1343 /* Don't log inquiry underruns */
1344 } else {
1345 esas2r_log(ESAS2R_LOG_WARN,
1346 "request failure - cdb:%x reqstatus:%d target:%d",
1347 rq->vrq->scsi.cdb[0], reqstatus,
1348 rq->target_id);
1349 }
1350 }
1351 }
1352}
1353
1354void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
1355{
1356 u32 starttime;
1357 u32 timeout;
1358
1359 starttime = jiffies_to_msecs(jiffies);
1360 timeout = rq->timeout ? rq->timeout : 5000;
1361
1362 while (true) {
1363 esas2r_polled_interrupt(a);
1364
1365 if (rq->req_stat != RS_STARTED)
1366 break;
1367
1368 schedule_timeout_interruptible(msecs_to_jiffies(100));
1369
1370 if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
1371 esas2r_hdebug("request TMO");
1372 esas2r_bugon();
1373
1374 rq->req_stat = RS_TIMEOUT;
1375
1376 esas2r_local_reset_adapter(a);
1377 return;
1378 }
1379 }
1380}
1381
1382u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo)
1383{
1384 u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1);
1385 u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE;
1386
1387 if (a->window_base != base) {
1388 esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP,
1389 base | MVRPW1R_ENABLE);
1390 esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP);
1391 a->window_base = base;
1392 }
1393
1394 return offset;
1395}
1396
1397/* Read a block of data from chip memory */
1398bool esas2r_read_mem_block(struct esas2r_adapter *a,
1399 void *to,
1400 u32 from,
1401 u32 size)
1402{
1403 u8 *end = (u8 *)to;
1404
1405 while (size) {
1406 u32 len;
1407 u32 offset;
1408 u32 iatvr;
1409
1410 iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE);
1411
1412 esas2r_map_data_window(a, iatvr);
1413
1414 offset = from & (MW_DATA_WINDOW_SIZE - 1);
1415 len = size;
1416
1417 if (len > MW_DATA_WINDOW_SIZE - offset)
1418 len = MW_DATA_WINDOW_SIZE - offset;
1419
1420 from += len;
1421 size -= len;
1422
1423 while (len--) {
1424 *end++ = esas2r_read_data_byte(a, offset);
1425 offset++;
1426 }
1427 }
1428
1429 return true;
1430}
1431
1432void esas2r_nuxi_mgt_data(u8 function, void *data)
1433{
1434 struct atto_vda_grp_info *g;
1435 struct atto_vda_devinfo *d;
1436 struct atto_vdapart_info *p;
1437 struct atto_vda_dh_info *h;
1438 struct atto_vda_metrics_info *m;
1439 struct atto_vda_schedule_info *s;
1440 struct atto_vda_buzzer_info *b;
1441 u8 i;
1442
1443 switch (function) {
1444 case VDAMGT_BUZZER_INFO:
1445 case VDAMGT_BUZZER_SET:
1446
1447 b = (struct atto_vda_buzzer_info *)data;
1448
1449 b->duration = le32_to_cpu(b->duration);
1450 break;
1451
1452 case VDAMGT_SCHEDULE_INFO:
1453 case VDAMGT_SCHEDULE_EVENT:
1454
1455 s = (struct atto_vda_schedule_info *)data;
1456
1457 s->id = le32_to_cpu(s->id);
1458
1459 break;
1460
1461 case VDAMGT_DEV_INFO:
1462 case VDAMGT_DEV_CLEAN:
1463 case VDAMGT_DEV_PT_INFO:
1464 case VDAMGT_DEV_FEATURES:
1465 case VDAMGT_DEV_PT_FEATURES:
1466 case VDAMGT_DEV_OPERATION:
1467
1468 d = (struct atto_vda_devinfo *)data;
1469
1470 d->capacity = le64_to_cpu(d->capacity);
1471 d->block_size = le32_to_cpu(d->block_size);
1472 d->ses_dev_index = le16_to_cpu(d->ses_dev_index);
1473 d->target_id = le16_to_cpu(d->target_id);
1474 d->lun = le16_to_cpu(d->lun);
1475 d->features = le16_to_cpu(d->features);
1476 break;
1477
1478 case VDAMGT_GRP_INFO:
1479 case VDAMGT_GRP_CREATE:
1480 case VDAMGT_GRP_DELETE:
1481 case VDAMGT_ADD_STORAGE:
1482 case VDAMGT_MEMBER_ADD:
1483 case VDAMGT_GRP_COMMIT:
1484 case VDAMGT_GRP_REBUILD:
1485 case VDAMGT_GRP_COMMIT_INIT:
1486 case VDAMGT_QUICK_RAID:
1487 case VDAMGT_GRP_FEATURES:
1488 case VDAMGT_GRP_COMMIT_INIT_AUTOMAP:
1489 case VDAMGT_QUICK_RAID_INIT_AUTOMAP:
1490 case VDAMGT_SPARE_LIST:
1491 case VDAMGT_SPARE_ADD:
1492 case VDAMGT_SPARE_REMOVE:
1493 case VDAMGT_LOCAL_SPARE_ADD:
1494 case VDAMGT_GRP_OPERATION:
1495
1496 g = (struct atto_vda_grp_info *)data;
1497
1498 g->capacity = le64_to_cpu(g->capacity);
1499 g->block_size = le32_to_cpu(g->block_size);
1500 g->interleave = le32_to_cpu(g->interleave);
1501 g->features = le16_to_cpu(g->features);
1502
1503 for (i = 0; i < 32; i++)
1504 g->members[i] = le16_to_cpu(g->members[i]);
1505
1506 break;
1507
1508 case VDAMGT_PART_INFO:
1509 case VDAMGT_PART_MAP:
1510 case VDAMGT_PART_UNMAP:
1511 case VDAMGT_PART_AUTOMAP:
1512 case VDAMGT_PART_SPLIT:
1513 case VDAMGT_PART_MERGE:
1514
1515 p = (struct atto_vdapart_info *)data;
1516
1517 p->part_size = le64_to_cpu(p->part_size);
1518 p->start_lba = le32_to_cpu(p->start_lba);
1519 p->block_size = le32_to_cpu(p->block_size);
1520 p->target_id = le16_to_cpu(p->target_id);
1521 break;
1522
1523 case VDAMGT_DEV_HEALTH_REQ:
1524
1525 h = (struct atto_vda_dh_info *)data;
1526
1527 h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt);
1528 h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt);
1529 break;
1530
1531 case VDAMGT_DEV_METRICS:
1532
1533 m = (struct atto_vda_metrics_info *)data;
1534
1535 for (i = 0; i < 32; i++)
1536 m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]);
1537
1538 break;
1539
1540 default:
1541 break;
1542 }
1543}
1544
1545void esas2r_nuxi_cfg_data(u8 function, void *data)
1546{
1547 struct atto_vda_cfg_init *ci;
1548
1549 switch (function) {
1550 case VDA_CFG_INIT:
1551 case VDA_CFG_GET_INIT:
1552 case VDA_CFG_GET_INIT2:
1553
1554 ci = (struct atto_vda_cfg_init *)data;
1555
1556 ci->date_time.year = le16_to_cpu(ci->date_time.year);
1557 ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size);
1558 ci->vda_version = le32_to_cpu(ci->vda_version);
1559 ci->epoch_time = le32_to_cpu(ci->epoch_time);
1560 ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel);
1561 ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend);
1562 break;
1563
1564 default:
1565 break;
1566 }
1567}
1568
1569void esas2r_nuxi_ae_data(union atto_vda_ae *ae)
1570{
1571 struct atto_vda_ae_raid *r = &ae->raid;
1572 struct atto_vda_ae_lu *l = &ae->lu;
1573
1574 switch (ae->hdr.bytype) {
1575 case VDAAE_HDR_TYPE_RAID:
1576
1577 r->dwflags = le32_to_cpu(r->dwflags);
1578 break;
1579
1580 case VDAAE_HDR_TYPE_LU:
1581
1582 l->dwevent = le32_to_cpu(l->dwevent);
1583 l->wphys_target_id = le16_to_cpu(l->wphys_target_id);
1584 l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id);
1585
1586 if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1587 + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) {
1588 l->id.tgtlun_raid.dwinterleave
1589 = le32_to_cpu(l->id.tgtlun_raid.dwinterleave);
1590 l->id.tgtlun_raid.dwblock_size
1591 = le32_to_cpu(l->id.tgtlun_raid.dwblock_size);
1592 }
1593
1594 break;
1595
1596 case VDAAE_HDR_TYPE_DISK:
1597 default:
1598 break;
1599 }
1600}
1601
1602void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
1603{
1604 unsigned long flags;
1605
1606 esas2r_rq_destroy_request(rq, a);
1607 spin_lock_irqsave(&a->request_lock, flags);
1608 list_add(&rq->comp_list, &a->avail_request);
1609 spin_unlock_irqrestore(&a->request_lock, flags);
1610}
1611
1612struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a)
1613{
1614 struct esas2r_request *rq;
1615 unsigned long flags;
1616
1617 spin_lock_irqsave(&a->request_lock, flags);
1618
1619 if (unlikely(list_empty(&a->avail_request))) {
1620 spin_unlock_irqrestore(&a->request_lock, flags);
1621 return NULL;
1622 }
1623
1624 rq = list_first_entry(&a->avail_request, struct esas2r_request,
1625 comp_list);
1626 list_del(&rq->comp_list);
1627 spin_unlock_irqrestore(&a->request_lock, flags);
1628 esas2r_rq_init_request(rq, a);
1629
1630 return rq;
1631
1632}
1633
1634void esas2r_complete_request_cb(struct esas2r_adapter *a,
1635 struct esas2r_request *rq)
1636{
1637 esas2r_debug("completing request %p\n", rq);
1638
1639 scsi_dma_unmap(rq->cmd);
1640
1641 if (unlikely(rq->req_stat != RS_SUCCESS)) {
1642 esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
1643 rq->req_stat,
1644 rq->func_rsp.scsi_rsp.scsi_stat,
1645 rq->cmd);
1646
1647 rq->cmd->result =
1648 ((esas2r_req_status_to_error(rq->req_stat) << 16)
1649 | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
1650
1651 if (rq->req_stat == RS_UNDERRUN)
1652 scsi_set_resid(rq->cmd,
1653 le32_to_cpu(rq->func_rsp.scsi_rsp.
1654 residual_length));
1655 else
1656 scsi_set_resid(rq->cmd, 0);
1657 }
1658
1659 rq->cmd->scsi_done(rq->cmd);
1660
1661 esas2r_free_request(a, rq);
1662}
1663
1664/* Run tasklet to handle stuff outside of interrupt context. */
1665void esas2r_adapter_tasklet(unsigned long context)
1666{
1667 struct esas2r_adapter *a = (struct esas2r_adapter *)context;
1668
1669 if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
1670 esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
1671 esas2r_timer_tick(a);
1672 }
1673
1674 if (likely(a->flags2 & AF2_INT_PENDING)) {
1675 esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
1676 esas2r_adapter_interrupt(a);
1677 }
1678
1679 if (esas2r_is_tasklet_pending(a))
1680 esas2r_do_tasklet_tasks(a);
1681
1682 if (esas2r_is_tasklet_pending(a)
1683 || (a->flags2 & AF2_INT_PENDING)
1684 || (a->flags2 & AF2_TIMER_TICK)) {
1685 esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
1686 esas2r_schedule_tasklet(a);
1687 } else {
1688 esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
1689 }
1690}
1691
1692static void esas2r_timer_callback(unsigned long context);
1693
1694void esas2r_kickoff_timer(struct esas2r_adapter *a)
1695{
1696 init_timer(&a->timer);
1697
1698 a->timer.function = esas2r_timer_callback;
1699 a->timer.data = (unsigned long)a;
1700 a->timer.expires = jiffies +
1701 msecs_to_jiffies(100);
1702
1703 add_timer(&a->timer);
1704}
1705
1706static void esas2r_timer_callback(unsigned long context)
1707{
1708 struct esas2r_adapter *a = (struct esas2r_adapter *)context;
1709
1710 esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
1711
1712 esas2r_schedule_tasklet(a);
1713
1714 esas2r_kickoff_timer(a);
1715}
1716
1717/*
1718 * Firmware events need to be handled outside of interrupt context
1719 * so we schedule a delayed_work to handle them.
1720 */
1721
1722static void
1723esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event)
1724{
1725 unsigned long flags;
1726 struct esas2r_adapter *a = fw_event->a;
1727
1728 spin_lock_irqsave(&a->fw_event_lock, flags);
1729 list_del(&fw_event->list);
1730 kfree(fw_event);
1731 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1732}
1733
1734void
1735esas2r_fw_event_off(struct esas2r_adapter *a)
1736{
1737 unsigned long flags;
1738
1739 spin_lock_irqsave(&a->fw_event_lock, flags);
1740 a->fw_events_off = 1;
1741 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1742}
1743
1744void
1745esas2r_fw_event_on(struct esas2r_adapter *a)
1746{
1747 unsigned long flags;
1748
1749 spin_lock_irqsave(&a->fw_event_lock, flags);
1750 a->fw_events_off = 0;
1751 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1752}
1753
1754static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id)
1755{
1756 int ret;
1757 struct scsi_device *scsi_dev;
1758
1759 scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
1760
1761 if (scsi_dev) {
1762 esas2r_log_dev(
1763 ESAS2R_LOG_WARN,
1764 &(scsi_dev->
1765 sdev_gendev),
1766 "scsi device already exists at id %d", target_id);
1767
1768 scsi_device_put(scsi_dev);
1769 } else {
1770 esas2r_log_dev(
1771 ESAS2R_LOG_INFO,
1772 &(a->host->
1773 shost_gendev),
1774 "scsi_add_device() called for 0:%d:0",
1775 target_id);
1776
1777 ret = scsi_add_device(a->host, 0, target_id, 0);
1778 if (ret) {
1779 esas2r_log_dev(
1780 ESAS2R_LOG_CRIT,
1781 &(a->host->
1782 shost_gendev),
1783 "scsi_add_device failed with %d for id %d",
1784 ret, target_id);
1785 }
1786 }
1787}
1788
1789static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id)
1790{
1791 struct scsi_device *scsi_dev;
1792
1793 scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
1794
1795 if (scsi_dev) {
1796 scsi_device_set_state(scsi_dev, SDEV_OFFLINE);
1797
1798 esas2r_log_dev(
1799 ESAS2R_LOG_INFO,
1800 &(scsi_dev->
1801 sdev_gendev),
1802 "scsi_remove_device() called for 0:%d:0",
1803 target_id);
1804
1805 scsi_remove_device(scsi_dev);
1806
1807 esas2r_log_dev(
1808 ESAS2R_LOG_INFO,
1809 &(scsi_dev->
1810 sdev_gendev),
1811 "scsi_device_put() called");
1812
1813 scsi_device_put(scsi_dev);
1814 } else {
1815 esas2r_log_dev(
1816 ESAS2R_LOG_WARN,
1817 &(a->host->shost_gendev),
1818 "no target found at id %d",
1819 target_id);
1820 }
1821}
1822
1823/*
1824 * Sends a firmware asynchronous event to anyone who happens to be
1825 * listening on the defined ATTO VDA event ports.
1826 */
1827static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event)
1828{
1829 struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data;
1830 char *type;
1831
1832 switch (ae->vda_ae.hdr.bytype) {
1833 case VDAAE_HDR_TYPE_RAID:
1834 type = "RAID group state change";
1835 break;
1836
1837 case VDAAE_HDR_TYPE_LU:
1838 type = "Mapped destination LU change";
1839 break;
1840
1841 case VDAAE_HDR_TYPE_DISK:
1842 type = "Physical disk inventory change";
1843 break;
1844
1845 case VDAAE_HDR_TYPE_RESET:
1846 type = "Firmware reset";
1847 break;
1848
1849 case VDAAE_HDR_TYPE_LOG_INFO:
1850 type = "Event Log message (INFO level)";
1851 break;
1852
1853 case VDAAE_HDR_TYPE_LOG_WARN:
1854 type = "Event Log message (WARN level)";
1855 break;
1856
1857 case VDAAE_HDR_TYPE_LOG_CRIT:
1858 type = "Event Log message (CRIT level)";
1859 break;
1860
1861 case VDAAE_HDR_TYPE_LOG_FAIL:
1862 type = "Event Log message (FAIL level)";
1863 break;
1864
1865 case VDAAE_HDR_TYPE_NVC:
1866 type = "NVCache change";
1867 break;
1868
1869 case VDAAE_HDR_TYPE_TLG_INFO:
1870 type = "Time stamped log message (INFO level)";
1871 break;
1872
1873 case VDAAE_HDR_TYPE_TLG_WARN:
1874 type = "Time stamped log message (WARN level)";
1875 break;
1876
1877 case VDAAE_HDR_TYPE_TLG_CRIT:
1878 type = "Time stamped log message (CRIT level)";
1879 break;
1880
1881 case VDAAE_HDR_TYPE_PWRMGT:
1882 type = "Power management";
1883 break;
1884
1885 case VDAAE_HDR_TYPE_MUTE:
1886 type = "Mute button pressed";
1887 break;
1888
1889 case VDAAE_HDR_TYPE_DEV:
1890 type = "Device attribute change";
1891 break;
1892
1893 default:
1894 type = "Unknown";
1895 break;
1896 }
1897
1898 esas2r_log(ESAS2R_LOG_WARN,
1899 "An async event of type \"%s\" was received from the firmware. The event contents are:",
1900 type);
1901 esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae,
1902 ae->vda_ae.hdr.bylength);
1903
1904}
1905
1906static void
1907esas2r_firmware_event_work(struct work_struct *work)
1908{
1909 struct esas2r_fw_event_work *fw_event =
1910 container_of(work, struct esas2r_fw_event_work, work.work);
1911
1912 struct esas2r_adapter *a = fw_event->a;
1913
1914 u16 target_id = *(u16 *)&fw_event->data[0];
1915
1916 if (a->fw_events_off)
1917 goto done;
1918
1919 switch (fw_event->type) {
1920 case fw_event_null:
1921 break; /* do nothing */
1922
1923 case fw_event_lun_change:
1924 esas2r_remove_device(a, target_id);
1925 esas2r_add_device(a, target_id);
1926 break;
1927
1928 case fw_event_present:
1929 esas2r_add_device(a, target_id);
1930 break;
1931
1932 case fw_event_not_present:
1933 esas2r_remove_device(a, target_id);
1934 break;
1935
1936 case fw_event_vda_ae:
1937 esas2r_send_ae_event(fw_event);
1938 break;
1939 }
1940
1941done:
1942 esas2r_free_fw_event(fw_event);
1943}
1944
1945void esas2r_queue_fw_event(struct esas2r_adapter *a,
1946 enum fw_event_type type,
1947 void *data,
1948 int data_sz)
1949{
1950 struct esas2r_fw_event_work *fw_event;
1951 unsigned long flags;
1952
1953 fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC);
1954 if (!fw_event) {
1955 esas2r_log(ESAS2R_LOG_WARN,
1956 "esas2r_queue_fw_event failed to alloc");
1957 return;
1958 }
1959
1960 if (type == fw_event_vda_ae) {
1961 struct esas2r_vda_ae *ae =
1962 (struct esas2r_vda_ae *)fw_event->data;
1963
1964 ae->signature = ESAS2R_VDA_EVENT_SIG;
1965 ae->bus_number = a->pcid->bus->number;
1966 ae->devfn = a->pcid->devfn;
1967 memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae));
1968 } else {
1969 memcpy(fw_event->data, data, data_sz);
1970 }
1971
1972 fw_event->type = type;
1973 fw_event->a = a;
1974
1975 spin_lock_irqsave(&a->fw_event_lock, flags);
1976 list_add_tail(&fw_event->list, &a->fw_event_list);
1977 INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
1978 queue_delayed_work_on(
1979 smp_processor_id(), a->fw_event_q, &fw_event->work,
1980 msecs_to_jiffies(1));
1981 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1982}
1983
1984void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id,
1985 u8 state)
1986{
1987 if (state == TS_LUN_CHANGE)
1988 esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id,
1989 sizeof(targ_id));
1990 else if (state == TS_PRESENT)
1991 esas2r_queue_fw_event(a, fw_event_present, &targ_id,
1992 sizeof(targ_id));
1993 else if (state == TS_NOT_PRESENT)
1994 esas2r_queue_fw_event(a, fw_event_not_present, &targ_id,
1995 sizeof(targ_id));
1996}
1997
1998/* Translate status to a Linux SCSI mid-layer error code */
1999int esas2r_req_status_to_error(u8 req_stat)
2000{
2001 switch (req_stat) {
2002 case RS_OVERRUN:
2003 case RS_UNDERRUN:
2004 case RS_SUCCESS:
2005 /*
2006 * NOTE: SCSI mid-layer wants a good status for a SCSI error, because
2007 * it will check the scsi_stat value in the completion anyway.
2008 */
2009 case RS_SCSI_ERROR:
2010 return DID_OK;
2011
2012 case RS_SEL:
2013 case RS_SEL2:
2014 return DID_NO_CONNECT;
2015
2016 case RS_RESET:
2017 return DID_RESET;
2018
2019 case RS_ABORTED:
2020 return DID_ABORT;
2021
2022 case RS_BUSY:
2023 return DID_BUS_BUSY;
2024 }
2025
2026 /* everything else is just an error. */
2027
2028 return DID_ERROR;
2029}
2030
2031module_init(esas2r_init);
2032module_exit(esas2r_exit);
diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c
new file mode 100644
index 000000000000..e540a2fa3d15
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_targdb.c
@@ -0,0 +1,306 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_targdb.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46void esas2r_targ_db_initialize(struct esas2r_adapter *a)
47{
48 struct esas2r_target *t;
49
50 for (t = a->targetdb; t < a->targetdb_end; t++) {
51 memset(t, 0, sizeof(struct esas2r_target));
52
53 t->target_state = TS_NOT_PRESENT;
54 t->buffered_target_state = TS_NOT_PRESENT;
55 t->new_target_state = TS_INVALID;
56 }
57}
58
59void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify)
60{
61 struct esas2r_target *t;
62 unsigned long flags;
63
64 for (t = a->targetdb; t < a->targetdb_end; t++) {
65 if (t->target_state != TS_PRESENT)
66 continue;
67
68 spin_lock_irqsave(&a->mem_lock, flags);
69 esas2r_targ_db_remove(a, t);
70 spin_unlock_irqrestore(&a->mem_lock, flags);
71
72 if (notify) {
73 esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
74 a));
75 esas2r_target_state_changed(a, esas2r_targ_get_id(t,
76 a),
77 TS_NOT_PRESENT);
78 }
79 }
80}
81
82void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
83{
84 struct esas2r_target *t;
85 unsigned long flags;
86
87 esas2r_trace_enter();
88
89 if (a->flags & AF_DISC_PENDING) {
90 esas2r_trace_exit();
91 return;
92 }
93
94 for (t = a->targetdb; t < a->targetdb_end; t++) {
95 u8 state = TS_INVALID;
96
97 spin_lock_irqsave(&a->mem_lock, flags);
98 if (t->buffered_target_state != t->target_state)
99 state = t->buffered_target_state = t->target_state;
100
101 spin_unlock_irqrestore(&a->mem_lock, flags);
102 if (state != TS_INVALID) {
103 esas2r_trace("targ_db_report_changes:%d",
104 esas2r_targ_get_id(
105 t,
106 a));
107 esas2r_trace("state:%d", state);
108
109 esas2r_target_state_changed(a,
110 esas2r_targ_get_id(t,
111 a),
112 state);
113 }
114 }
115
116 esas2r_trace_exit();
117}
118
119struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
120 struct esas2r_disc_context *
121 dc)
122{
123 struct esas2r_target *t;
124
125 esas2r_trace_enter();
126
127 if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
128 esas2r_bugon();
129 esas2r_trace_exit();
130 return NULL;
131 }
132
133 t = a->targetdb + dc->curr_virt_id;
134
135 if (t->target_state == TS_PRESENT) {
136 esas2r_trace_exit();
137 return NULL;
138 }
139
140 esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name,
141 esas2r_targ_get_id(
142 t,
143 a));
144
145 if (dc->interleave == 0
146 || dc->block_size == 0) {
147 /* these are invalid values, don't create the target entry. */
148
149 esas2r_hdebug("invalid RAID group dimensions");
150
151 esas2r_trace_exit();
152
153 return NULL;
154 }
155
156 t->block_size = dc->block_size;
157 t->inter_byte = dc->interleave;
158 t->inter_block = dc->interleave / dc->block_size;
159 t->virt_targ_id = dc->curr_virt_id;
160 t->phys_targ_id = ESAS2R_TARG_ID_INV;
161
162 t->flags &= ~TF_PASS_THRU;
163 t->flags |= TF_USED;
164
165 t->identifier_len = 0;
166
167 t->target_state = TS_PRESENT;
168
169 return t;
170}
171
172struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
173 struct esas2r_disc_context *dc,
174 u8 *ident,
175 u8 ident_len)
176{
177 struct esas2r_target *t;
178
179 esas2r_trace_enter();
180
181 if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
182 esas2r_bugon();
183 esas2r_trace_exit();
184 return NULL;
185 }
186
187 /* see if we found this device before. */
188
189 t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
190
191 if (t == NULL) {
192 t = a->targetdb + dc->curr_virt_id;
193
194 if (ident_len > sizeof(t->identifier)
195 || t->target_state == TS_PRESENT) {
196 esas2r_trace_exit();
197 return NULL;
198 }
199 }
200
201 esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
202 dc->curr_virt_id,
203 dc->curr_phys_id);
204
205 t->block_size = 0;
206 t->inter_byte = 0;
207 t->inter_block = 0;
208 t->virt_targ_id = dc->curr_virt_id;
209 t->phys_targ_id = dc->curr_phys_id;
210 t->identifier_len = ident_len;
211
212 memcpy(t->identifier, ident, ident_len);
213
214 t->flags |= TF_PASS_THRU | TF_USED;
215
216 t->target_state = TS_PRESENT;
217
218 return t;
219}
220
221void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
222{
223 esas2r_trace_enter();
224
225 t->target_state = TS_NOT_PRESENT;
226
227 esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
228
229 esas2r_trace_exit();
230}
231
232struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
233 u64 *sas_addr)
234{
235 struct esas2r_target *t;
236
237 for (t = a->targetdb; t < a->targetdb_end; t++)
238 if (t->sas_addr == *sas_addr)
239 return t;
240
241 return NULL;
242}
243
244struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
245 void *identifier,
246 u8 ident_len)
247{
248 struct esas2r_target *t;
249
250 for (t = a->targetdb; t < a->targetdb_end; t++) {
251 if (ident_len == t->identifier_len
252 && memcmp(&t->identifier[0], identifier,
253 ident_len) == 0)
254 return t;
255 }
256
257 return NULL;
258}
259
260u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id)
261{
262 u16 id = target_id + 1;
263
264 while (id < ESAS2R_MAX_TARGETS) {
265 struct esas2r_target *t = a->targetdb + id;
266
267 if (t->target_state == TS_PRESENT)
268 break;
269
270 id++;
271 }
272
273 return id;
274}
275
276struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
277 u16 virt_id)
278{
279 struct esas2r_target *t;
280
281 for (t = a->targetdb; t < a->targetdb_end; t++) {
282 if (t->target_state != TS_PRESENT)
283 continue;
284
285 if (t->virt_targ_id == virt_id)
286 return t;
287 }
288
289 return NULL;
290}
291
292u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a)
293{
294 u16 devcnt = 0;
295 struct esas2r_target *t;
296 unsigned long flags;
297
298 spin_lock_irqsave(&a->mem_lock, flags);
299 for (t = a->targetdb; t < a->targetdb_end; t++)
300 if (t->target_state == TS_PRESENT)
301 devcnt++;
302
303 spin_unlock_irqrestore(&a->mem_lock, flags);
304
305 return devcnt;
306}
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
new file mode 100644
index 000000000000..f8ec6d636846
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -0,0 +1,521 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_vda.c
3 * esas2r driver VDA firmware interface functions
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44
45#include "esas2r.h"
46
47static u8 esas2r_vdaioctl_versions[] = {
48 ATTO_VDA_VER_UNSUPPORTED,
49 ATTO_VDA_FLASH_VER,
50 ATTO_VDA_VER_UNSUPPORTED,
51 ATTO_VDA_VER_UNSUPPORTED,
52 ATTO_VDA_CLI_VER,
53 ATTO_VDA_VER_UNSUPPORTED,
54 ATTO_VDA_CFG_VER,
55 ATTO_VDA_MGT_VER,
56 ATTO_VDA_GSV_VER
57};
58
59static void clear_vda_request(struct esas2r_request *rq);
60
61static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
62 struct esas2r_request *rq);
63
64/* Prepare a VDA IOCTL request to be sent to the firmware. */
65bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
66 struct atto_ioctl_vda *vi,
67 struct esas2r_request *rq,
68 struct esas2r_sg_context *sgc)
69{
70 u32 datalen = 0;
71 struct atto_vda_sge *firstsg = NULL;
72 u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
73
74 vi->status = ATTO_STS_SUCCESS;
75 vi->vda_status = RS_PENDING;
76
77 if (vi->function >= vercnt) {
78 vi->status = ATTO_STS_INV_FUNC;
79 return false;
80 }
81
82 if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
83 vi->status = ATTO_STS_INV_VERSION;
84 return false;
85 }
86
87 if (a->flags & AF_DEGRADED_MODE) {
88 vi->status = ATTO_STS_DEGRADED;
89 return false;
90 }
91
92 if (vi->function != VDA_FUNC_SCSI)
93 clear_vda_request(rq);
94
95 rq->vrq->scsi.function = vi->function;
96 rq->interrupt_cb = esas2r_complete_vda_ioctl;
97 rq->interrupt_cx = vi;
98
99 switch (vi->function) {
100 case VDA_FUNC_FLASH:
101
102 if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
103 && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
104 && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
105 vi->status = ATTO_STS_INV_FUNC;
106 return false;
107 }
108
109 if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
110 datalen = vi->data_length;
111
112 rq->vrq->flash.length = cpu_to_le32(datalen);
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
114
115 memcpy(rq->vrq->flash.data.file.file_name,
116 vi->cmd.flash.data.file.file_name,
117 sizeof(vi->cmd.flash.data.file.file_name));
118
119 firstsg = rq->vrq->flash.data.file.sge;
120 break;
121
122 case VDA_FUNC_CLI:
123
124 datalen = vi->data_length;
125
126 rq->vrq->cli.cmd_rsp_len =
127 cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
128 rq->vrq->cli.length = cpu_to_le32(datalen);
129
130 firstsg = rq->vrq->cli.sge;
131 break;
132
133 case VDA_FUNC_MGT:
134 {
135 u8 *cmdcurr_offset = sgc->cur_offset
136 - offsetof(struct atto_ioctl_vda, data)
137 + offsetof(struct atto_ioctl_vda, cmd)
138 + offsetof(struct atto_ioctl_vda_mgt_cmd,
139 data);
140 /*
141 * build the data payload SGL here first since
142 * esas2r_sgc_init() will modify the S/G list offset for the
143 * management SGL (which is built below where the data SGL is
144 * usually built).
145 */
146
147 if (vi->data_length) {
148 u32 payldlen = 0;
149
150 if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
151 || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
152 rq->vrq->mgt.payld_sglst_offset =
153 (u8)offsetof(struct atto_vda_mgmt_req,
154 payld_sge);
155
156 payldlen = vi->data_length;
157 datalen = vi->cmd.mgt.data_length;
158 } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
159 || vi->cmd.mgt.mgt_func ==
160 VDAMGT_DEV_INFO2_BYADDR) {
161 datalen = vi->data_length;
162 cmdcurr_offset = sgc->cur_offset;
163 } else {
164 vi->status = ATTO_STS_INV_PARAM;
165 return false;
166 }
167
168 /* Setup the length so building the payload SGL works */
169 rq->vrq->mgt.length = cpu_to_le32(datalen);
170
171 if (payldlen) {
172 rq->vrq->mgt.payld_length =
173 cpu_to_le32(payldlen);
174
175 esas2r_sgc_init(sgc, a, rq,
176 rq->vrq->mgt.payld_sge);
177 sgc->length = payldlen;
178
179 if (!esas2r_build_sg_list(a, rq, sgc)) {
180 vi->status = ATTO_STS_OUT_OF_RSRC;
181 return false;
182 }
183 }
184 } else {
185 datalen = vi->cmd.mgt.data_length;
186
187 rq->vrq->mgt.length = cpu_to_le32(datalen);
188 }
189
190 /*
191 * Now that the payload SGL is built, if any, setup to build
192 * the management SGL.
193 */
194 firstsg = rq->vrq->mgt.sge;
195 sgc->cur_offset = cmdcurr_offset;
196
197 /* Finish initializing the management request. */
198 rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
199 rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
200 rq->vrq->mgt.dev_index =
201 cpu_to_le32(vi->cmd.mgt.dev_index);
202
203 esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
204 break;
205 }
206
207 case VDA_FUNC_CFG:
208
209 if (vi->data_length
210 || vi->cmd.cfg.data_length == 0) {
211 vi->status = ATTO_STS_INV_PARAM;
212 return false;
213 }
214
215 if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
216 vi->status = ATTO_STS_INV_FUNC;
217 return false;
218 }
219
220 rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
221 rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
222
223 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
224 memcpy(&rq->vrq->cfg.data,
225 &vi->cmd.cfg.data,
226 vi->cmd.cfg.data_length);
227
228 esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
229 &rq->vrq->cfg.data);
230 } else {
231 vi->status = ATTO_STS_INV_FUNC;
232
233 return false;
234 }
235
236 break;
237
238 case VDA_FUNC_GSV:
239
240 vi->cmd.gsv.rsp_len = vercnt;
241
242 memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
243 vercnt);
244
245 vi->vda_status = RS_SUCCESS;
246 break;
247
248 default:
249
250 vi->status = ATTO_STS_INV_FUNC;
251 return false;
252 }
253
254 if (datalen) {
255 esas2r_sgc_init(sgc, a, rq, firstsg);
256 sgc->length = datalen;
257
258 if (!esas2r_build_sg_list(a, rq, sgc)) {
259 vi->status = ATTO_STS_OUT_OF_RSRC;
260 return false;
261 }
262 }
263
264 esas2r_start_request(a, rq);
265
266 return true;
267}
268
269static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
270 struct esas2r_request *rq)
271{
272 struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
273
274 vi->vda_status = rq->req_stat;
275
276 switch (vi->function) {
277 case VDA_FUNC_FLASH:
278
279 if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
280 || vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
281 vi->cmd.flash.data.file.file_size =
282 le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
283
284 break;
285
286 case VDA_FUNC_MGT:
287
288 vi->cmd.mgt.scan_generation =
289 rq->func_rsp.mgt_rsp.scan_generation;
290 vi->cmd.mgt.dev_index = le16_to_cpu(
291 rq->func_rsp.mgt_rsp.dev_index);
292
293 if (vi->data_length == 0)
294 vi->cmd.mgt.data_length =
295 le32_to_cpu(rq->func_rsp.mgt_rsp.length);
296
297 esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
298 break;
299
300 case VDA_FUNC_CFG:
301
302 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
303 struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
304 struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
305
306 cfg->data_length =
307 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
308 cfg->data.init.vda_version =
309 le32_to_cpu(rsp->vda_version);
310 cfg->data.init.fw_build = rsp->fw_build;
311
312 sprintf((char *)&cfg->data.init.fw_release,
313 "%1d.%02d",
314 (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
315 (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
316
317 if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
318 cfg->data.init.fw_version =
319 cfg->data.init.fw_build;
320 else
321 cfg->data.init.fw_version =
322 cfg->data.init.fw_release;
323 } else {
324 esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
325 &vi->cmd.cfg.data);
326 }
327
328 break;
329
330 case VDA_FUNC_CLI:
331
332 vi->cmd.cli.cmd_rsp_len =
333 le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
334 break;
335
336 default:
337
338 break;
339 }
340}
341
342/* Build a flash VDA request. */
343void esas2r_build_flash_req(struct esas2r_adapter *a,
344 struct esas2r_request *rq,
345 u8 sub_func,
346 u8 cksum,
347 u32 addr,
348 u32 length)
349{
350 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
351
352 clear_vda_request(rq);
353
354 rq->vrq->scsi.function = VDA_FUNC_FLASH;
355
356 if (sub_func == VDA_FLASH_BEGINW
357 || sub_func == VDA_FLASH_WRITE
358 || sub_func == VDA_FLASH_READ)
359 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
360 data.sge);
361
362 vrq->length = cpu_to_le32(length);
363 vrq->flash_addr = cpu_to_le32(addr);
364 vrq->checksum = cksum;
365 vrq->sub_func = sub_func;
366}
367
368/* Build a VDA management request. */
369void esas2r_build_mgt_req(struct esas2r_adapter *a,
370 struct esas2r_request *rq,
371 u8 sub_func,
372 u8 scan_gen,
373 u16 dev_index,
374 u32 length,
375 void *data)
376{
377 struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
378
379 clear_vda_request(rq);
380
381 rq->vrq->scsi.function = VDA_FUNC_MGT;
382
383 vrq->mgt_func = sub_func;
384 vrq->scan_generation = scan_gen;
385 vrq->dev_index = cpu_to_le16(dev_index);
386 vrq->length = cpu_to_le32(length);
387
388 if (vrq->length) {
389 if (a->flags & AF_LEGACY_SGE_MODE) {
390 vrq->sg_list_offset = (u8)offsetof(
391 struct atto_vda_mgmt_req, sge);
392
393 vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
394 vrq->sge[0].address = cpu_to_le64(
395 rq->vrq_md->phys_addr +
396 sizeof(union atto_vda_req));
397 } else {
398 vrq->sg_list_offset = (u8)offsetof(
399 struct atto_vda_mgmt_req, prde);
400
401 vrq->prde[0].ctl_len = cpu_to_le32(length);
402 vrq->prde[0].address = cpu_to_le64(
403 rq->vrq_md->phys_addr +
404 sizeof(union atto_vda_req));
405 }
406 }
407
408 if (data) {
409 esas2r_nuxi_mgt_data(sub_func, data);
410
411 memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
412 length);
413 }
414}
415
416/* Build a VDA asyncronous event (AE) request. */
417void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
418{
419 struct atto_vda_ae_req *vrq = &rq->vrq->ae;
420
421 clear_vda_request(rq);
422
423 rq->vrq->scsi.function = VDA_FUNC_AE;
424
425 vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
426
427 if (a->flags & AF_LEGACY_SGE_MODE) {
428 vrq->sg_list_offset =
429 (u8)offsetof(struct atto_vda_ae_req, sge);
430 vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
431 vrq->sge[0].address = cpu_to_le64(
432 rq->vrq_md->phys_addr +
433 sizeof(union atto_vda_req));
434 } else {
435 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
436 prde);
437 vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
438 vrq->prde[0].address = cpu_to_le64(
439 rq->vrq_md->phys_addr +
440 sizeof(union atto_vda_req));
441 }
442}
443
444/* Build a VDA CLI request. */
445void esas2r_build_cli_req(struct esas2r_adapter *a,
446 struct esas2r_request *rq,
447 u32 length,
448 u32 cmd_rsp_len)
449{
450 struct atto_vda_cli_req *vrq = &rq->vrq->cli;
451
452 clear_vda_request(rq);
453
454 rq->vrq->scsi.function = VDA_FUNC_CLI;
455
456 vrq->length = cpu_to_le32(length);
457 vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
458 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
459}
460
461/* Build a VDA IOCTL request. */
462void esas2r_build_ioctl_req(struct esas2r_adapter *a,
463 struct esas2r_request *rq,
464 u32 length,
465 u8 sub_func)
466{
467 struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
468
469 clear_vda_request(rq);
470
471 rq->vrq->scsi.function = VDA_FUNC_IOCTL;
472
473 vrq->length = cpu_to_le32(length);
474 vrq->sub_func = sub_func;
475 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
476}
477
478/* Build a VDA configuration request. */
479void esas2r_build_cfg_req(struct esas2r_adapter *a,
480 struct esas2r_request *rq,
481 u8 sub_func,
482 u32 length,
483 void *data)
484{
485 struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
486
487 clear_vda_request(rq);
488
489 rq->vrq->scsi.function = VDA_FUNC_CFG;
490
491 vrq->sub_func = sub_func;
492 vrq->length = cpu_to_le32(length);
493
494 if (data) {
495 esas2r_nuxi_cfg_data(sub_func, data);
496
497 memcpy(&vrq->data, data, length);
498 }
499}
500
501static void clear_vda_request(struct esas2r_request *rq)
502{
503 u32 handle = rq->vrq->scsi.handle;
504
505 memset(rq->vrq, 0, sizeof(*rq->vrq));
506
507 rq->vrq->scsi.handle = handle;
508
509 rq->req_stat = RS_PENDING;
510
511 /* since the data buffer is separate clear that too */
512
513 memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
514
515 /*
516 * Setup next and prev pointer in case the request is not going through
517 * esas2r_start_request().
518 */
519
520 INIT_LIST_HEAD(&rq->req_list);
521}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 7f4f790a3d71..b766f5aea584 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -583,7 +583,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
583 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 583 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
584 if (likely(h->msix_vector)) 584 if (likely(h->msix_vector))
585 c->Header.ReplyQueue = 585 c->Header.ReplyQueue =
586 smp_processor_id() % h->nreply_queues; 586 raw_smp_processor_id() % h->nreply_queues;
587 } 587 }
588} 588}
589 589
@@ -1205,8 +1205,8 @@ static void complete_scsi_command(struct CommandList *cp)
1205 scsi_set_resid(cmd, ei->ResidualCnt); 1205 scsi_set_resid(cmd, ei->ResidualCnt);
1206 1206
1207 if (ei->CommandStatus == 0) { 1207 if (ei->CommandStatus == 0) {
1208 cmd->scsi_done(cmd);
1209 cmd_free(h, cp); 1208 cmd_free(h, cp);
1209 cmd->scsi_done(cmd);
1210 return; 1210 return;
1211 } 1211 }
1212 1212
@@ -1379,8 +1379,8 @@ static void complete_scsi_command(struct CommandList *cp)
1379 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1379 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1380 cp, ei->CommandStatus); 1380 cp, ei->CommandStatus);
1381 } 1381 }
1382 cmd->scsi_done(cmd);
1383 cmd_free(h, cp); 1382 cmd_free(h, cp);
1383 cmd->scsi_done(cmd);
1384} 1384}
1385 1385
1386static void hpsa_pci_unmap(struct pci_dev *pdev, 1386static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -2721,7 +2721,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
2721 } while (test_and_set_bit 2721 } while (test_and_set_bit
2722 (i & (BITS_PER_LONG - 1), 2722 (i & (BITS_PER_LONG - 1),
2723 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2723 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2724 h->nr_allocs++;
2725 spin_unlock_irqrestore(&h->lock, flags); 2724 spin_unlock_irqrestore(&h->lock, flags);
2726 2725
2727 c = h->cmd_pool + i; 2726 c = h->cmd_pool + i;
@@ -2793,7 +2792,6 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2793 spin_lock_irqsave(&h->lock, flags); 2792 spin_lock_irqsave(&h->lock, flags);
2794 clear_bit(i & (BITS_PER_LONG - 1), 2793 clear_bit(i & (BITS_PER_LONG - 1),
2795 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2794 h->cmd_pool_bits + (i / BITS_PER_LONG));
2796 h->nr_frees++;
2797 spin_unlock_irqrestore(&h->lock, flags); 2795 spin_unlock_irqrestore(&h->lock, flags);
2798} 2796}
2799 2797
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 981647989bfd..bc85e7244f40 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -98,8 +98,6 @@ struct ctlr_info {
98 struct ErrorInfo *errinfo_pool; 98 struct ErrorInfo *errinfo_pool;
99 dma_addr_t errinfo_pool_dhandle; 99 dma_addr_t errinfo_pool_dhandle;
100 unsigned long *cmd_pool_bits; 100 unsigned long *cmd_pool_bits;
101 int nr_allocs;
102 int nr_frees;
103 int scan_finished; 101 int scan_finished;
104 spinlock_t scan_lock; 102 spinlock_t scan_lock;
105 wait_queue_head_t scan_wait_queue; 103 wait_queue_head_t scan_wait_queue;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 6601e03520cc..36ac1c34ce97 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9990,6 +9990,20 @@ static struct pci_device_id ipr_pci_table[] = {
9990 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, 9990 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9991 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, 9991 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9992 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, 9992 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
9993 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9994 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
9995 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9996 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
9997 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9998 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
9999 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10000 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10001 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10002 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10003 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10004 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10005 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10006 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
9993 { } 10007 { }
9994}; 10008};
9995MODULE_DEVICE_TABLE(pci, ipr_pci_table); 10009MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 07a85ce41782..cad1483f05da 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -100,6 +100,13 @@
100#define IPR_SUBS_DEV_ID_57D6 0x03FC 100#define IPR_SUBS_DEV_ID_57D6 0x03FC
101#define IPR_SUBS_DEV_ID_57D7 0x03FF 101#define IPR_SUBS_DEV_ID_57D7 0x03FF
102#define IPR_SUBS_DEV_ID_57D8 0x03FE 102#define IPR_SUBS_DEV_ID_57D8 0x03FE
103#define IPR_SUBS_DEV_ID_57D9 0x046D
104#define IPR_SUBS_DEV_ID_57EB 0x0474
105#define IPR_SUBS_DEV_ID_57EC 0x0475
106#define IPR_SUBS_DEV_ID_57ED 0x0499
107#define IPR_SUBS_DEV_ID_57EE 0x049A
108#define IPR_SUBS_DEV_ID_57EF 0x049B
109#define IPR_SUBS_DEV_ID_57F0 0x049C
103#define IPR_NAME "ipr" 110#define IPR_NAME "ipr"
104 111
105/* 112/*
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index cd962da4a57a..85c77f6b802b 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -311,9 +311,9 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
311 &ihost->phys[phy_index]); 311 &ihost->phys[phy_index]);
312 312
313 assigned_phy_mask |= (1 << phy_index); 313 assigned_phy_mask |= (1 << phy_index);
314 phy_index++;
314 } 315 }
315 316
316 phy_index++;
317 } 317 }
318 318
319 return sci_port_configuration_agent_validate_ports(ihost, port_agent); 319 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ae69dfcc7834..e3995612ea76 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2812,6 +2812,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2812 kfree(session->boot_nic); 2812 kfree(session->boot_nic);
2813 kfree(session->boot_target); 2813 kfree(session->boot_target);
2814 kfree(session->ifacename); 2814 kfree(session->ifacename);
2815 kfree(session->portal_type);
2816 kfree(session->discovery_parent_type);
2815 2817
2816 iscsi_destroy_session(cls_session); 2818 iscsi_destroy_session(cls_session);
2817 iscsi_host_dec_session_cnt(shost); 2819 iscsi_host_dec_session_cnt(shost);
@@ -3168,6 +3170,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3168{ 3170{
3169 struct iscsi_conn *conn = cls_conn->dd_data; 3171 struct iscsi_conn *conn = cls_conn->dd_data;
3170 struct iscsi_session *session = conn->session; 3172 struct iscsi_session *session = conn->session;
3173 int val;
3171 3174
3172 switch(param) { 3175 switch(param) {
3173 case ISCSI_PARAM_FAST_ABORT: 3176 case ISCSI_PARAM_FAST_ABORT:
@@ -3257,6 +3260,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3257 return iscsi_switch_str_param(&session->boot_nic, buf); 3260 return iscsi_switch_str_param(&session->boot_nic, buf);
3258 case ISCSI_PARAM_BOOT_TARGET: 3261 case ISCSI_PARAM_BOOT_TARGET:
3259 return iscsi_switch_str_param(&session->boot_target, buf); 3262 return iscsi_switch_str_param(&session->boot_target, buf);
3263 case ISCSI_PARAM_PORTAL_TYPE:
3264 return iscsi_switch_str_param(&session->portal_type, buf);
3265 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3266 return iscsi_switch_str_param(&session->discovery_parent_type,
3267 buf);
3268 case ISCSI_PARAM_DISCOVERY_SESS:
3269 sscanf(buf, "%d", &val);
3270 session->discovery_sess = !!val;
3271 break;
3260 default: 3272 default:
3261 return -ENOSYS; 3273 return -ENOSYS;
3262 } 3274 }
@@ -3305,6 +3317,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3305 case ISCSI_PARAM_DATASEQ_INORDER_EN: 3317 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3306 len = sprintf(buf, "%d\n", session->dataseq_inorder_en); 3318 len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
3307 break; 3319 break;
3320 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
3321 len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
3322 break;
3308 case ISCSI_PARAM_ERL: 3323 case ISCSI_PARAM_ERL:
3309 len = sprintf(buf, "%d\n", session->erl); 3324 len = sprintf(buf, "%d\n", session->erl);
3310 break; 3325 break;
@@ -3344,6 +3359,52 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3344 case ISCSI_PARAM_BOOT_TARGET: 3359 case ISCSI_PARAM_BOOT_TARGET:
3345 len = sprintf(buf, "%s\n", session->boot_target); 3360 len = sprintf(buf, "%s\n", session->boot_target);
3346 break; 3361 break;
3362 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
3363 len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
3364 break;
3365 case ISCSI_PARAM_DISCOVERY_SESS:
3366 len = sprintf(buf, "%u\n", session->discovery_sess);
3367 break;
3368 case ISCSI_PARAM_PORTAL_TYPE:
3369 len = sprintf(buf, "%s\n", session->portal_type);
3370 break;
3371 case ISCSI_PARAM_CHAP_AUTH_EN:
3372 len = sprintf(buf, "%u\n", session->chap_auth_en);
3373 break;
3374 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
3375 len = sprintf(buf, "%u\n", session->discovery_logout_en);
3376 break;
3377 case ISCSI_PARAM_BIDI_CHAP_EN:
3378 len = sprintf(buf, "%u\n", session->bidi_chap_en);
3379 break;
3380 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
3381 len = sprintf(buf, "%u\n", session->discovery_auth_optional);
3382 break;
3383 case ISCSI_PARAM_DEF_TIME2WAIT:
3384 len = sprintf(buf, "%d\n", session->time2wait);
3385 break;
3386 case ISCSI_PARAM_DEF_TIME2RETAIN:
3387 len = sprintf(buf, "%d\n", session->time2retain);
3388 break;
3389 case ISCSI_PARAM_TSID:
3390 len = sprintf(buf, "%u\n", session->tsid);
3391 break;
3392 case ISCSI_PARAM_ISID:
3393 len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
3394 session->isid[0], session->isid[1],
3395 session->isid[2], session->isid[3],
3396 session->isid[4], session->isid[5]);
3397 break;
3398 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
3399 len = sprintf(buf, "%u\n", session->discovery_parent_idx);
3400 break;
3401 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
3402 if (session->discovery_parent_type)
3403 len = sprintf(buf, "%s\n",
3404 session->discovery_parent_type);
3405 else
3406 len = sprintf(buf, "\n");
3407 break;
3347 default: 3408 default:
3348 return -ENOSYS; 3409 return -ENOSYS;
3349 } 3410 }
@@ -3433,6 +3494,54 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
3433 case ISCSI_PARAM_PERSISTENT_ADDRESS: 3494 case ISCSI_PARAM_PERSISTENT_ADDRESS:
3434 len = sprintf(buf, "%s\n", conn->persistent_address); 3495 len = sprintf(buf, "%s\n", conn->persistent_address);
3435 break; 3496 break;
3497 case ISCSI_PARAM_STATSN:
3498 len = sprintf(buf, "%u\n", conn->statsn);
3499 break;
3500 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
3501 len = sprintf(buf, "%u\n", conn->max_segment_size);
3502 break;
3503 case ISCSI_PARAM_KEEPALIVE_TMO:
3504 len = sprintf(buf, "%u\n", conn->keepalive_tmo);
3505 break;
3506 case ISCSI_PARAM_LOCAL_PORT:
3507 len = sprintf(buf, "%u\n", conn->local_port);
3508 break;
3509 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
3510 len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
3511 break;
3512 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
3513 len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
3514 break;
3515 case ISCSI_PARAM_TCP_WSF_DISABLE:
3516 len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
3517 break;
3518 case ISCSI_PARAM_TCP_TIMER_SCALE:
3519 len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
3520 break;
3521 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
3522 len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
3523 break;
3524 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
3525 len = sprintf(buf, "%u\n", conn->fragment_disable);
3526 break;
3527 case ISCSI_PARAM_IPV4_TOS:
3528 len = sprintf(buf, "%u\n", conn->ipv4_tos);
3529 break;
3530 case ISCSI_PARAM_IPV6_TC:
3531 len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
3532 break;
3533 case ISCSI_PARAM_IPV6_FLOW_LABEL:
3534 len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
3535 break;
3536 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
3537 len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
3538 break;
3539 case ISCSI_PARAM_TCP_XMIT_WSF:
3540 len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
3541 break;
3542 case ISCSI_PARAM_TCP_RECV_WSF:
3543 len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
3544 break;
3436 default: 3545 default:
3437 return -ENOSYS; 3546 return -ENOSYS;
3438 } 3547 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 93f222d66716..df43bfe6d573 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -421,6 +421,7 @@ struct lpfc_vport {
421 uint32_t cfg_enable_da_id; 421 uint32_t cfg_enable_da_id;
422 uint32_t cfg_max_scsicmpl_time; 422 uint32_t cfg_max_scsicmpl_time;
423 uint32_t cfg_tgt_queue_depth; 423 uint32_t cfg_tgt_queue_depth;
424 uint32_t cfg_first_burst_size;
424 425
425 uint32_t dev_loss_tmo_changed; 426 uint32_t dev_loss_tmo_changed;
426 427
@@ -710,8 +711,6 @@ struct lpfc_hba {
710 uint32_t cfg_use_msi; 711 uint32_t cfg_use_msi;
711 uint32_t cfg_fcp_imax; 712 uint32_t cfg_fcp_imax;
712 uint32_t cfg_fcp_cpu_map; 713 uint32_t cfg_fcp_cpu_map;
713 uint32_t cfg_fcp_wq_count;
714 uint32_t cfg_fcp_eq_count;
715 uint32_t cfg_fcp_io_channel; 714 uint32_t cfg_fcp_io_channel;
716 uint32_t cfg_total_seg_cnt; 715 uint32_t cfg_total_seg_cnt;
717 uint32_t cfg_sg_seg_cnt; 716 uint32_t cfg_sg_seg_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5cb08ae3e8c2..22f42f866f75 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -674,9 +674,6 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
674 int i; 674 int i;
675 int rc; 675 int rc;
676 676
677 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
678 return 0;
679
680 init_completion(&online_compl); 677 init_completion(&online_compl);
681 rc = lpfc_workq_post_event(phba, &status, &online_compl, 678 rc = lpfc_workq_post_event(phba, &status, &online_compl,
682 LPFC_EVT_OFFLINE_PREP); 679 LPFC_EVT_OFFLINE_PREP);
@@ -744,14 +741,15 @@ lpfc_selective_reset(struct lpfc_hba *phba)
744 int status = 0; 741 int status = 0;
745 int rc; 742 int rc;
746 743
747 if ((!phba->cfg_enable_hba_reset) || 744 if (!phba->cfg_enable_hba_reset)
748 (phba->pport->fc_flag & FC_OFFLINE_MODE))
749 return -EACCES; 745 return -EACCES;
750 746
751 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 747 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
748 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
752 749
753 if (status != 0) 750 if (status != 0)
754 return status; 751 return status;
752 }
755 753
756 init_completion(&online_compl); 754 init_completion(&online_compl);
757 rc = lpfc_workq_post_event(phba, &status, &online_compl, 755 rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -2591,9 +2589,12 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
2591 2589
2592/* 2590/*
2593# lun_queue_depth: This parameter is used to limit the number of outstanding 2591# lun_queue_depth: This parameter is used to limit the number of outstanding
2594# commands per FCP LUN. Value range is [1,128]. Default value is 30. 2592# commands per FCP LUN. Value range is [1,512]. Default value is 30.
2593# If this parameter value is greater than 1/8th the maximum number of exchanges
2594# supported by the HBA port, then the lun queue depth will be reduced to
2595# 1/8th the maximum number of exchanges.
2595*/ 2596*/
2596LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, 2597LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
2597 "Max number of FCP commands we can queue to a specific LUN"); 2598 "Max number of FCP commands we can queue to a specific LUN");
2598 2599
2599/* 2600/*
@@ -2601,7 +2602,7 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
2601# commands per target port. Value range is [10,65535]. Default value is 65535. 2602# commands per target port. Value range is [10,65535]. Default value is 65535.
2602*/ 2603*/
2603LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535, 2604LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
2604 "Max number of FCP commands we can queue to a specific target port"); 2605 "Max number of FCP commands we can queue to a specific target port");
2605 2606
2606/* 2607/*
2607# hba_queue_depth: This parameter is used to limit the number of outstanding 2608# hba_queue_depth: This parameter is used to limit the number of outstanding
@@ -3949,6 +3950,14 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
3949 "Use ADISC on rediscovery to authenticate FCP devices"); 3950 "Use ADISC on rediscovery to authenticate FCP devices");
3950 3951
3951/* 3952/*
3953# lpfc_first_burst_size: First burst size to use on the NPorts
3954# that support first burst.
3955# Value range is [0,65536]. Default value is 0.
3956*/
3957LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
3958 "First burst size for Targets that support first burst");
3959
3960/*
3952# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue 3961# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
3953# depth. Default value is 0. When the value of this parameter is zero the 3962# depth. Default value is 0. When the value of this parameter is zero the
3954# SCSI command completion time is not used for controlling I/O queue depth. When 3963# SCSI command completion time is not used for controlling I/O queue depth. When
@@ -4112,25 +4121,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
4112 "MSI-X (2), if possible"); 4121 "MSI-X (2), if possible");
4113 4122
4114/* 4123/*
4115# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
4116# This parameter is ignored and will eventually be depricated
4117#
4118# Value range is [1,7]. Default value is 4.
4119*/
4120LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
4121 LPFC_FCP_IO_CHAN_MAX,
4122 "Set the number of fast-path FCP work queues, if possible");
4123
4124/*
4125# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
4126#
4127# Value range is [1,7]. Default value is 4.
4128*/
4129LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
4130 LPFC_FCP_IO_CHAN_MAX,
4131 "Set the number of fast-path FCP event queues, if possible");
4132
4133/*
4134# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels 4124# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
4135# 4125#
4136# Value range is [1,7]. Default value is 4. 4126# Value range is [1,7]. Default value is 4.
@@ -4276,6 +4266,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4276 &dev_attr_lpfc_devloss_tmo, 4266 &dev_attr_lpfc_devloss_tmo,
4277 &dev_attr_lpfc_fcp_class, 4267 &dev_attr_lpfc_fcp_class,
4278 &dev_attr_lpfc_use_adisc, 4268 &dev_attr_lpfc_use_adisc,
4269 &dev_attr_lpfc_first_burst_size,
4279 &dev_attr_lpfc_ack0, 4270 &dev_attr_lpfc_ack0,
4280 &dev_attr_lpfc_topology, 4271 &dev_attr_lpfc_topology,
4281 &dev_attr_lpfc_scan_down, 4272 &dev_attr_lpfc_scan_down,
@@ -4307,8 +4298,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
4307 &dev_attr_lpfc_use_msi, 4298 &dev_attr_lpfc_use_msi,
4308 &dev_attr_lpfc_fcp_imax, 4299 &dev_attr_lpfc_fcp_imax,
4309 &dev_attr_lpfc_fcp_cpu_map, 4300 &dev_attr_lpfc_fcp_cpu_map,
4310 &dev_attr_lpfc_fcp_wq_count,
4311 &dev_attr_lpfc_fcp_eq_count,
4312 &dev_attr_lpfc_fcp_io_channel, 4301 &dev_attr_lpfc_fcp_io_channel,
4313 &dev_attr_lpfc_enable_bg, 4302 &dev_attr_lpfc_enable_bg,
4314 &dev_attr_lpfc_soft_wwnn, 4303 &dev_attr_lpfc_soft_wwnn,
@@ -4352,6 +4341,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
4352 &dev_attr_lpfc_restrict_login, 4341 &dev_attr_lpfc_restrict_login,
4353 &dev_attr_lpfc_fcp_class, 4342 &dev_attr_lpfc_fcp_class,
4354 &dev_attr_lpfc_use_adisc, 4343 &dev_attr_lpfc_use_adisc,
4344 &dev_attr_lpfc_first_burst_size,
4355 &dev_attr_lpfc_fdmi_on, 4345 &dev_attr_lpfc_fdmi_on,
4356 &dev_attr_lpfc_max_luns, 4346 &dev_attr_lpfc_max_luns,
4357 &dev_attr_nport_evt_cnt, 4347 &dev_attr_nport_evt_cnt,
@@ -5290,8 +5280,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5290 lpfc_use_msi_init(phba, lpfc_use_msi); 5280 lpfc_use_msi_init(phba, lpfc_use_msi);
5291 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5281 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
5292 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); 5282 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
5293 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
5294 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
5295 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); 5283 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
5296 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 5284 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
5297 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 5285 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
@@ -5331,6 +5319,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
5331 lpfc_restrict_login_init(vport, lpfc_restrict_login); 5319 lpfc_restrict_login_init(vport, lpfc_restrict_login);
5332 lpfc_fcp_class_init(vport, lpfc_fcp_class); 5320 lpfc_fcp_class_init(vport, lpfc_fcp_class);
5333 lpfc_use_adisc_init(vport, lpfc_use_adisc); 5321 lpfc_use_adisc_init(vport, lpfc_use_adisc);
5322 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
5334 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); 5323 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
5335 lpfc_fdmi_on_init(vport, lpfc_fdmi_on); 5324 lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
5336 lpfc_discovery_threads_init(vport, lpfc_discovery_threads); 5325 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6630520d295c..bc270639c1c3 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2498,7 +2498,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2498 struct lpfc_sli_ct_request *ctreq = NULL; 2498 struct lpfc_sli_ct_request *ctreq = NULL;
2499 int ret_val = 0; 2499 int ret_val = 0;
2500 int time_left; 2500 int time_left;
2501 int iocb_stat = 0; 2501 int iocb_stat = IOCB_SUCCESS;
2502 unsigned long flags; 2502 unsigned long flags;
2503 2503
2504 *txxri = 0; 2504 *txxri = 0;
@@ -2574,6 +2574,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2574 2574
2575 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2575 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2576 cmdiocbq->vport = phba->pport; 2576 cmdiocbq->vport = phba->pport;
2577 cmdiocbq->iocb_cmpl = NULL;
2577 2578
2578 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2579 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2579 rspiocbq, 2580 rspiocbq,
@@ -2963,7 +2964,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2963 uint8_t *ptr = NULL, *rx_databuf = NULL; 2964 uint8_t *ptr = NULL, *rx_databuf = NULL;
2964 int rc = 0; 2965 int rc = 0;
2965 int time_left; 2966 int time_left;
2966 int iocb_stat; 2967 int iocb_stat = IOCB_SUCCESS;
2967 unsigned long flags; 2968 unsigned long flags;
2968 void *dataout = NULL; 2969 void *dataout = NULL;
2969 uint32_t total_mem; 2970 uint32_t total_mem;
@@ -3149,6 +3150,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
3149 } 3150 }
3150 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3151 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3151 cmdiocbq->vport = phba->pport; 3152 cmdiocbq->vport = phba->pport;
3153 cmdiocbq->iocb_cmpl = NULL;
3152 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3154 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3153 rspiocbq, (phba->fc_ratov * 2) + 3155 rspiocbq, (phba->fc_ratov * 2) +
3154 LPFC_DRVR_TIMEOUT); 3156 LPFC_DRVR_TIMEOUT);
@@ -3209,7 +3211,7 @@ err_loopback_test_exit:
3209 lpfc_bsg_event_unref(evt); /* delete */ 3211 lpfc_bsg_event_unref(evt); /* delete */
3210 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3212 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3211 3213
3212 if (cmdiocbq != NULL) 3214 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3213 lpfc_sli_release_iocbq(phba, cmdiocbq); 3215 lpfc_sli_release_iocbq(phba, cmdiocbq);
3214 3216
3215 if (rspiocbq != NULL) 3217 if (rspiocbq != NULL)
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 68391177432b..02e8cd923d0a 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -895,7 +895,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
895 895
896 if (irsp->ulpStatus) { 896 if (irsp->ulpStatus) {
897 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 897 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
898 "0268 NS cmd %x Error (%d %d)\n", 898 "0268 NS cmd x%x Error (x%x x%x)\n",
899 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); 899 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
900 900
901 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 901 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index af49fb03dbb8..e409ba5f728c 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -154,6 +154,7 @@ struct lpfc_node_rrq {
154#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 154#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
155#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 155#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
156#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 156#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
157#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
157#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ 158#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
158 159
159/* ndlp usage management macros */ 160/* ndlp usage management macros */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6b8ee7449f16..110445f0c58d 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2122,6 +2122,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2122 } 2122 }
2123 npr->estabImagePair = 1; 2123 npr->estabImagePair = 1;
2124 npr->readXferRdyDis = 1; 2124 npr->readXferRdyDis = 1;
2125 if (vport->cfg_first_burst_size)
2126 npr->writeXferRdyDis = 1;
2125 2127
2126 /* For FCP support */ 2128 /* For FCP support */
2127 npr->prliType = PRLI_FCP_TYPE; 2129 npr->prliType = PRLI_FCP_TYPE;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4ec3d7c044c2..086c3f28caa6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -234,6 +234,9 @@ struct ulp_bde64 {
234 uint32_t addrHigh; 234 uint32_t addrHigh;
235}; 235};
236 236
237/* Maximun size of immediate data that can fit into a 128 byte WQE */
238#define LPFC_MAX_BDE_IMM_SIZE 64
239
237struct lpfc_sli4_flags { 240struct lpfc_sli4_flags {
238 uint32_t word0; 241 uint32_t word0;
239#define lpfc_idx_rsrc_rdy_SHIFT 0 242#define lpfc_idx_rsrc_rdy_SHIFT 0
@@ -2585,6 +2588,9 @@ struct lpfc_sli4_parameters {
2585#define cfg_mqv_WORD word6 2588#define cfg_mqv_WORD word6
2586 uint32_t word7; 2589 uint32_t word7;
2587 uint32_t word8; 2590 uint32_t word8;
2591#define cfg_wqsize_SHIFT 8
2592#define cfg_wqsize_MASK 0x0000000f
2593#define cfg_wqsize_WORD word8
2588#define cfg_wqv_SHIFT 14 2594#define cfg_wqv_SHIFT 14
2589#define cfg_wqv_MASK 0x00000003 2595#define cfg_wqv_MASK 0x00000003
2590#define cfg_wqv_WORD word8 2596#define cfg_wqv_WORD word8
@@ -3622,6 +3628,13 @@ union lpfc_wqe {
3622 struct gen_req64_wqe gen_req; 3628 struct gen_req64_wqe gen_req;
3623}; 3629};
3624 3630
3631union lpfc_wqe128 {
3632 uint32_t words[32];
3633 struct lpfc_wqe_generic generic;
3634 struct xmit_seq64_wqe xmit_sequence;
3635 struct gen_req64_wqe gen_req;
3636};
3637
3625#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001 3638#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
3626#define LPFC_FILE_TYPE_GROUP 0xf7 3639#define LPFC_FILE_TYPE_GROUP 0xf7
3627#define LPFC_FILE_ID_GROUP 0xa2 3640#define LPFC_FILE_ID_GROUP 0xa2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e0b20fad8502..501147c4a147 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -472,10 +472,22 @@ lpfc_config_port_post(struct lpfc_hba *phba)
472 lpfc_sli_read_link_ste(phba); 472 lpfc_sli_read_link_ste(phba);
473 473
474 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 474 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
475 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 475 i = (mb->un.varRdConfig.max_xri + 1);
476 phba->cfg_hba_queue_depth = 476 if (phba->cfg_hba_queue_depth > i) {
477 (mb->un.varRdConfig.max_xri + 1) - 477 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
478 lpfc_sli4_get_els_iocb_cnt(phba); 478 "3359 HBA queue depth changed from %d to %d\n",
479 phba->cfg_hba_queue_depth, i);
480 phba->cfg_hba_queue_depth = i;
481 }
482
483 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
484 i = (mb->un.varRdConfig.max_xri >> 3);
485 if (phba->pport->cfg_lun_queue_depth > i) {
486 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
487 "3360 LUN queue depth changed from %d to %d\n",
488 phba->pport->cfg_lun_queue_depth, i);
489 phba->pport->cfg_lun_queue_depth = i;
490 }
479 491
480 phba->lmt = mb->un.varRdConfig.lmt; 492 phba->lmt = mb->un.varRdConfig.lmt;
481 493
@@ -4901,9 +4913,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4901 lpfc_get_cfgparam(phba); 4913 lpfc_get_cfgparam(phba);
4902 phba->max_vpi = LPFC_MAX_VPI; 4914 phba->max_vpi = LPFC_MAX_VPI;
4903 4915
4904 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
4905 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4906
4907 /* This will be set to correct value after the read_config mbox */ 4916 /* This will be set to correct value after the read_config mbox */
4908 phba->max_vports = 0; 4917 phba->max_vports = 0;
4909 4918
@@ -6664,12 +6673,14 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6664 goto read_cfg_out; 6673 goto read_cfg_out;
6665 6674
6666 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 6675 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
6667 if (phba->cfg_hba_queue_depth > 6676 length = phba->sli4_hba.max_cfg_param.max_xri -
6668 (phba->sli4_hba.max_cfg_param.max_xri - 6677 lpfc_sli4_get_els_iocb_cnt(phba);
6669 lpfc_sli4_get_els_iocb_cnt(phba))) 6678 if (phba->cfg_hba_queue_depth > length) {
6670 phba->cfg_hba_queue_depth = 6679 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6671 phba->sli4_hba.max_cfg_param.max_xri - 6680 "3361 HBA queue depth changed from %d to %d\n",
6672 lpfc_sli4_get_els_iocb_cnt(phba); 6681 phba->cfg_hba_queue_depth, length);
6682 phba->cfg_hba_queue_depth = length;
6683 }
6673 6684
6674 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 6685 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6675 LPFC_SLI_INTF_IF_TYPE_2) 6686 LPFC_SLI_INTF_IF_TYPE_2)
@@ -6859,11 +6870,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6859 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; 6870 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6860 } 6871 }
6861 6872
6862 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
6863
6864 /* The actual number of FCP event queues adopted */ 6873 /* The actual number of FCP event queues adopted */
6865 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6866 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6867 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 6874 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6868 6875
6869 /* Get EQ depth from module parameter, fake the default for now */ 6876 /* Get EQ depth from module parameter, fake the default for now */
@@ -9154,6 +9161,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9154 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9161 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9155 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9162 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
9156 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 9163 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
9164 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
9157 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 9165 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
9158 mbx_sli4_parameters); 9166 mbx_sli4_parameters);
9159 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 9167 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index b1c510f6b8f0..1f292e29d566 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -178,7 +178,8 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
178 mb->mbxOwner = OWN_HOST; 178 mb->mbxOwner = OWN_HOST;
179 mb->un.varDmp.cv = 1; 179 mb->un.varDmp.cv = 1;
180 mb->un.varDmp.type = DMP_NV_PARAMS; 180 mb->un.varDmp.type = DMP_NV_PARAMS;
181 mb->un.varDmp.entry_index = 0; 181 if (phba->sli_rev < LPFC_SLI_REV4)
182 mb->un.varDmp.entry_index = 0;
182 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; 183 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
183 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; 184 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
184 mb->un.varDmp.co = 0; 185 mb->un.varDmp.co = 0;
@@ -361,7 +362,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
361 /* NEW_FEATURE 362 /* NEW_FEATURE
362 * SLI-2, Coalescing Response Feature. 363 * SLI-2, Coalescing Response Feature.
363 */ 364 */
364 if (phba->cfg_cr_delay) { 365 if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
365 mb->un.varCfgLnk.cr = 1; 366 mb->un.varCfgLnk.cr = 1;
366 mb->un.varCfgLnk.ci = 1; 367 mb->un.varCfgLnk.ci = 1;
367 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; 368 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
@@ -377,7 +378,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
377 mb->un.varCfgLnk.crtov = phba->fc_crtov; 378 mb->un.varCfgLnk.crtov = phba->fc_crtov;
378 mb->un.varCfgLnk.citov = phba->fc_citov; 379 mb->un.varCfgLnk.citov = phba->fc_citov;
379 380
380 if (phba->cfg_ack0) 381 if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
381 mb->un.varCfgLnk.ack0_enable = 1; 382 mb->un.varCfgLnk.ack0_enable = 1;
382 383
383 mb->mbxCommand = MBX_CONFIG_LINK; 384 mb->mbxCommand = MBX_CONFIG_LINK;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6aaf39a1f1c5..abc361259d6d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -690,11 +690,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
690 690
691 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 691 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
692 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 692 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
693 ndlp->nlp_flag &= ~NLP_FIRSTBURST;
693 if (npr->prliType == PRLI_FCP_TYPE) { 694 if (npr->prliType == PRLI_FCP_TYPE) {
694 if (npr->initiatorFunc) 695 if (npr->initiatorFunc)
695 ndlp->nlp_type |= NLP_FCP_INITIATOR; 696 ndlp->nlp_type |= NLP_FCP_INITIATOR;
696 if (npr->targetFunc) 697 if (npr->targetFunc) {
697 ndlp->nlp_type |= NLP_FCP_TARGET; 698 ndlp->nlp_type |= NLP_FCP_TARGET;
699 if (npr->writeXferRdyDis)
700 ndlp->nlp_flag |= NLP_FIRSTBURST;
701 }
698 if (npr->Retry) 702 if (npr->Retry)
699 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 703 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
700 } 704 }
@@ -1676,12 +1680,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1676 /* Check out PRLI rsp */ 1680 /* Check out PRLI rsp */
1677 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1681 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1678 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 1682 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1683 ndlp->nlp_flag &= ~NLP_FIRSTBURST;
1679 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1684 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1680 (npr->prliType == PRLI_FCP_TYPE)) { 1685 (npr->prliType == PRLI_FCP_TYPE)) {
1681 if (npr->initiatorFunc) 1686 if (npr->initiatorFunc)
1682 ndlp->nlp_type |= NLP_FCP_INITIATOR; 1687 ndlp->nlp_type |= NLP_FCP_INITIATOR;
1683 if (npr->targetFunc) 1688 if (npr->targetFunc) {
1684 ndlp->nlp_type |= NLP_FCP_TARGET; 1689 ndlp->nlp_type |= NLP_FCP_TARGET;
1690 if (npr->writeXferRdyDis)
1691 ndlp->nlp_flag |= NLP_FIRSTBURST;
1692 }
1685 if (npr->Retry) 1693 if (npr->Retry)
1686 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1694 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1687 } 1695 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 243de1d324b7..1242b6c4308b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4386,11 +4386,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4386 if (scsi_sg_count(scsi_cmnd)) { 4386 if (scsi_sg_count(scsi_cmnd)) {
4387 if (datadir == DMA_TO_DEVICE) { 4387 if (datadir == DMA_TO_DEVICE) {
4388 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4388 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4389 if (sli4) 4389 iocb_cmd->ulpPU = PARM_READ_CHECK;
4390 iocb_cmd->ulpPU = PARM_READ_CHECK; 4390 if (vport->cfg_first_burst_size &&
4391 else { 4391 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4392 iocb_cmd->un.fcpi.fcpi_parm = 0; 4392 piocbq->iocb.un.fcpi.fcpi_XRdy =
4393 iocb_cmd->ulpPU = 0; 4393 vport->cfg_first_burst_size;
4394 } 4394 }
4395 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4395 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4396 phba->fc4OutputRequests++; 4396 phba->fc4OutputRequests++;
@@ -5022,6 +5022,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
5022 lpfc_release_scsi_buf(phba, lpfc_cmd); 5022 lpfc_release_scsi_buf(phba, lpfc_cmd);
5023 return FAILED; 5023 return FAILED;
5024 } 5024 }
5025 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5025 5026
5026 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5027 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5027 "0702 Issue %s to TGT %d LUN %d " 5028 "0702 Issue %s to TGT %d LUN %d "
@@ -5034,7 +5035,6 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
5034 iocbq, iocbqrsp, lpfc_cmd->timeout); 5035 iocbq, iocbqrsp, lpfc_cmd->timeout);
5035 if (status != IOCB_SUCCESS) { 5036 if (status != IOCB_SUCCESS) {
5036 if (status == IOCB_TIMEDOUT) { 5037 if (status == IOCB_TIMEDOUT) {
5037 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5038 ret = TIMEOUT_ERROR; 5038 ret = TIMEOUT_ERROR;
5039 } else 5039 } else
5040 ret = FAILED; 5040 ret = FAILED;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43440ca16f46..0392e114531c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -6163,6 +6163,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6163 kfree(vpd); 6163 kfree(vpd);
6164 goto out_free_mbox; 6164 goto out_free_mbox;
6165 } 6165 }
6166
6166 mqe = &mboxq->u.mqe; 6167 mqe = &mboxq->u.mqe;
6167 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6168 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6168 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 6169 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
@@ -6249,6 +6250,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6249 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6250 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6250 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6251 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6251 6252
6253 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6254 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6255 if (phba->pport->cfg_lun_queue_depth > rc) {
6256 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6257 "3362 LUN queue depth changed from %d to %d\n",
6258 phba->pport->cfg_lun_queue_depth, rc);
6259 phba->pport->cfg_lun_queue_depth = rc;
6260 }
6261
6262
6252 /* 6263 /*
6253 * Discover the port's supported feature set and match it against the 6264 * Discover the port's supported feature set and match it against the
6254 * hosts requests. 6265 * hosts requests.
@@ -9889,6 +9900,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9889 struct lpfc_scsi_buf *lpfc_cmd; 9900 struct lpfc_scsi_buf *lpfc_cmd;
9890 9901
9891 spin_lock_irqsave(&phba->hbalock, iflags); 9902 spin_lock_irqsave(&phba->hbalock, iflags);
9903 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
9904
9905 /*
9906 * A time out has occurred for the iocb. If a time out
9907 * completion handler has been supplied, call it. Otherwise,
9908 * just free the iocbq.
9909 */
9910
9911 spin_unlock_irqrestore(&phba->hbalock, iflags);
9912 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
9913 cmdiocbq->wait_iocb_cmpl = NULL;
9914 if (cmdiocbq->iocb_cmpl)
9915 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
9916 else
9917 lpfc_sli_release_iocbq(phba, cmdiocbq);
9918 return;
9919 }
9920
9892 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 9921 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9893 if (cmdiocbq->context2 && rspiocbq) 9922 if (cmdiocbq->context2 && rspiocbq)
9894 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 9923 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
@@ -9944,10 +9973,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9944 * @timeout: Timeout in number of seconds. 9973 * @timeout: Timeout in number of seconds.
9945 * 9974 *
9946 * This function issues the iocb to firmware and waits for the 9975 * This function issues the iocb to firmware and waits for the
9947 * iocb to complete. If the iocb command is not 9976 * iocb to complete. The iocb_cmpl field of the shall be used
9948 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 9977 * to handle iocbs which time out. If the field is NULL, the
9949 * Caller should not free the iocb resources if this function 9978 * function shall free the iocbq structure. If more clean up is
9950 * returns IOCB_TIMEDOUT. 9979 * needed, the caller is expected to provide a completion function
9980 * that will provide the needed clean up. If the iocb command is
9981 * not completed within timeout seconds, the function will either
9982 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
9983 * completion function set in the iocb_cmpl field and then return
9984 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
9985 * resources if this function returns IOCB_TIMEDOUT.
9951 * The function waits for the iocb completion using an 9986 * The function waits for the iocb completion using an
9952 * non-interruptible wait. 9987 * non-interruptible wait.
9953 * This function will sleep while waiting for iocb completion. 9988 * This function will sleep while waiting for iocb completion.
@@ -9980,6 +10015,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9980 int txq_cnt = 0; 10015 int txq_cnt = 0;
9981 int txcmplq_cnt = 0; 10016 int txcmplq_cnt = 0;
9982 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10017 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10018 unsigned long iflags;
10019 bool iocb_completed = true;
10020
9983 /* 10021 /*
9984 * If the caller has provided a response iocbq buffer, then context2 10022 * If the caller has provided a response iocbq buffer, then context2
9985 * is NULL or its an error. 10023 * is NULL or its an error.
@@ -9990,9 +10028,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9990 piocb->context2 = prspiocbq; 10028 piocb->context2 = prspiocbq;
9991 } 10029 }
9992 10030
10031 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
9993 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 10032 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9994 piocb->context_un.wait_queue = &done_q; 10033 piocb->context_un.wait_queue = &done_q;
9995 piocb->iocb_flag &= ~LPFC_IO_WAKE; 10034 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
9996 10035
9997 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 10036 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9998 if (lpfc_readl(phba->HCregaddr, &creg_val)) 10037 if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -10009,8 +10048,19 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10009 timeleft = wait_event_timeout(done_q, 10048 timeleft = wait_event_timeout(done_q,
10010 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 10049 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10011 timeout_req); 10050 timeout_req);
10051 spin_lock_irqsave(&phba->hbalock, iflags);
10052 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
10012 10053
10013 if (piocb->iocb_flag & LPFC_IO_WAKE) { 10054 /*
10055 * IOCB timed out. Inform the wake iocb wait
10056 * completion function and set local status
10057 */
10058
10059 iocb_completed = false;
10060 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
10061 }
10062 spin_unlock_irqrestore(&phba->hbalock, iflags);
10063 if (iocb_completed) {
10014 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10064 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10015 "0331 IOCB wake signaled\n"); 10065 "0331 IOCB wake signaled\n");
10016 } else if (timeleft == 0) { 10066 } else if (timeleft == 0) {
@@ -10122,7 +10172,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10122 */ 10172 */
10123 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 10173 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10124 retval = MBX_SUCCESS; 10174 retval = MBX_SUCCESS;
10125 lpfc_sli4_swap_str(phba, pmboxq);
10126 } else { 10175 } else {
10127 retval = MBX_TIMEOUT; 10176 retval = MBX_TIMEOUT;
10128 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10177 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -12820,10 +12869,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12820 wq->page_count); 12869 wq->page_count);
12821 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 12870 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12822 cq->queue_id); 12871 cq->queue_id);
12872
12873 /* wqv is the earliest version supported, NOT the latest */
12823 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12874 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12824 phba->sli4_hba.pc_sli4_params.wqv); 12875 phba->sli4_hba.pc_sli4_params.wqv);
12825 12876
12826 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 12877 switch (phba->sli4_hba.pc_sli4_params.wqv) {
12878 case LPFC_Q_CREATE_VERSION_0:
12879 switch (wq->entry_size) {
12880 default:
12881 case 64:
12882 /* Nothing to do, version 0 ONLY supports 64 byte */
12883 page = wq_create->u.request.page;
12884 break;
12885 case 128:
12886 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
12887 LPFC_WQ_SZ128_SUPPORT)) {
12888 status = -ERANGE;
12889 goto out;
12890 }
12891 /* If we get here the HBA MUST also support V1 and
12892 * we MUST use it
12893 */
12894 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12895 LPFC_Q_CREATE_VERSION_1);
12896
12897 bf_set(lpfc_mbx_wq_create_wqe_count,
12898 &wq_create->u.request_1, wq->entry_count);
12899 bf_set(lpfc_mbx_wq_create_wqe_size,
12900 &wq_create->u.request_1,
12901 LPFC_WQ_WQE_SIZE_128);
12902 bf_set(lpfc_mbx_wq_create_page_size,
12903 &wq_create->u.request_1,
12904 (PAGE_SIZE/SLI4_PAGE_SIZE));
12905 page = wq_create->u.request_1.page;
12906 break;
12907 }
12908 break;
12909 case LPFC_Q_CREATE_VERSION_1:
12827 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 12910 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12828 wq->entry_count); 12911 wq->entry_count);
12829 switch (wq->entry_size) { 12912 switch (wq->entry_size) {
@@ -12834,6 +12917,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12834 LPFC_WQ_WQE_SIZE_64); 12917 LPFC_WQ_WQE_SIZE_64);
12835 break; 12918 break;
12836 case 128: 12919 case 128:
12920 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
12921 LPFC_WQ_SZ128_SUPPORT)) {
12922 status = -ERANGE;
12923 goto out;
12924 }
12837 bf_set(lpfc_mbx_wq_create_wqe_size, 12925 bf_set(lpfc_mbx_wq_create_wqe_size,
12838 &wq_create->u.request_1, 12926 &wq_create->u.request_1,
12839 LPFC_WQ_WQE_SIZE_128); 12927 LPFC_WQ_WQE_SIZE_128);
@@ -12842,9 +12930,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12842 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 12930 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12843 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12931 (PAGE_SIZE/SLI4_PAGE_SIZE));
12844 page = wq_create->u.request_1.page; 12932 page = wq_create->u.request_1.page;
12845 } else { 12933 break;
12846 page = wq_create->u.request.page; 12934 default:
12935 status = -ERANGE;
12936 goto out;
12847 } 12937 }
12938
12848 list_for_each_entry(dmabuf, &wq->page_list, list) { 12939 list_for_each_entry(dmabuf, &wq->page_list, list) {
12849 memset(dmabuf->virt, 0, hw_page_size); 12940 memset(dmabuf->virt, 0, hw_page_size);
12850 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 12941 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
@@ -14665,14 +14756,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14665 first_iocbq->iocb.unsli3.rcvsli3.vpi = 14756 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14666 vport->phba->vpi_ids[vport->vpi]; 14757 vport->phba->vpi_ids[vport->vpi];
14667 /* put the first buffer into the first IOCBq */ 14758 /* put the first buffer into the first IOCBq */
14759 tot_len = bf_get(lpfc_rcqe_length,
14760 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
14761
14668 first_iocbq->context2 = &seq_dmabuf->dbuf; 14762 first_iocbq->context2 = &seq_dmabuf->dbuf;
14669 first_iocbq->context3 = NULL; 14763 first_iocbq->context3 = NULL;
14670 first_iocbq->iocb.ulpBdeCount = 1; 14764 first_iocbq->iocb.ulpBdeCount = 1;
14671 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14765 if (tot_len > LPFC_DATA_BUF_SIZE)
14766 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14672 LPFC_DATA_BUF_SIZE; 14767 LPFC_DATA_BUF_SIZE;
14768 else
14769 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
14770
14673 first_iocbq->iocb.un.rcvels.remoteID = sid; 14771 first_iocbq->iocb.un.rcvels.remoteID = sid;
14674 tot_len = bf_get(lpfc_rcqe_length, 14772
14675 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
14676 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14773 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14677 } 14774 }
14678 iocbq = first_iocbq; 14775 iocbq = first_iocbq;
@@ -14688,14 +14785,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14688 if (!iocbq->context3) { 14785 if (!iocbq->context3) {
14689 iocbq->context3 = d_buf; 14786 iocbq->context3 = d_buf;
14690 iocbq->iocb.ulpBdeCount++; 14787 iocbq->iocb.ulpBdeCount++;
14691 pbde = (struct ulp_bde64 *)
14692 &iocbq->iocb.unsli3.sli3Words[4];
14693 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
14694
14695 /* We need to get the size out of the right CQE */ 14788 /* We need to get the size out of the right CQE */
14696 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14789 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14697 len = bf_get(lpfc_rcqe_length, 14790 len = bf_get(lpfc_rcqe_length,
14698 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14791 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14792 pbde = (struct ulp_bde64 *)
14793 &iocbq->iocb.unsli3.sli3Words[4];
14794 if (len > LPFC_DATA_BUF_SIZE)
14795 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
14796 else
14797 pbde->tus.f.bdeSize = len;
14798
14699 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 14799 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14700 tot_len += len; 14800 tot_len += len;
14701 } else { 14801 } else {
@@ -14710,16 +14810,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14710 lpfc_in_buf_free(vport->phba, d_buf); 14810 lpfc_in_buf_free(vport->phba, d_buf);
14711 continue; 14811 continue;
14712 } 14812 }
14813 /* We need to get the size out of the right CQE */
14814 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14815 len = bf_get(lpfc_rcqe_length,
14816 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14713 iocbq->context2 = d_buf; 14817 iocbq->context2 = d_buf;
14714 iocbq->context3 = NULL; 14818 iocbq->context3 = NULL;
14715 iocbq->iocb.ulpBdeCount = 1; 14819 iocbq->iocb.ulpBdeCount = 1;
14716 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14820 if (len > LPFC_DATA_BUF_SIZE)
14821 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14717 LPFC_DATA_BUF_SIZE; 14822 LPFC_DATA_BUF_SIZE;
14823 else
14824 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
14718 14825
14719 /* We need to get the size out of the right CQE */
14720 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14721 len = bf_get(lpfc_rcqe_length,
14722 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14723 tot_len += len; 14826 tot_len += len;
14724 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14827 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14725 14828
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 9d2e0c6fe334..97617996206d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -60,7 +60,8 @@ struct lpfc_iocbq {
60 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 60 uint8_t retry; /* retry counter for IOCB cmd - if needed */
61 uint16_t iocb_flag; 61 uint16_t iocb_flag;
62#define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 62#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
63#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 63#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
64#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
64#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 65#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
65#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 66#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
66#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 67#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
@@ -93,6 +94,8 @@ struct lpfc_iocbq {
93 94
94 void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 95 void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
95 struct lpfc_iocbq *); 96 struct lpfc_iocbq *);
97 void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
98 struct lpfc_iocbq *);
96 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 99 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
97 struct lpfc_iocbq *); 100 struct lpfc_iocbq *);
98}; 101};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d710b87a4417..5bcc38223ac9 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -117,6 +117,7 @@ union sli4_qe {
117 struct lpfc_rcqe_complete *rcqe_complete; 117 struct lpfc_rcqe_complete *rcqe_complete;
118 struct lpfc_mqe *mqe; 118 struct lpfc_mqe *mqe;
119 union lpfc_wqe *wqe; 119 union lpfc_wqe *wqe;
120 union lpfc_wqe128 *wqe128;
120 struct lpfc_rqe *rqe; 121 struct lpfc_rqe *rqe;
121}; 122};
122 123
@@ -325,12 +326,14 @@ struct lpfc_bmbx {
325#define LPFC_EQE_SIZE_16B 16 326#define LPFC_EQE_SIZE_16B 16
326#define LPFC_CQE_SIZE 16 327#define LPFC_CQE_SIZE 16
327#define LPFC_WQE_SIZE 64 328#define LPFC_WQE_SIZE 64
329#define LPFC_WQE128_SIZE 128
328#define LPFC_MQE_SIZE 256 330#define LPFC_MQE_SIZE 256
329#define LPFC_RQE_SIZE 8 331#define LPFC_RQE_SIZE 8
330 332
331#define LPFC_EQE_DEF_COUNT 1024 333#define LPFC_EQE_DEF_COUNT 1024
332#define LPFC_CQE_DEF_COUNT 1024 334#define LPFC_CQE_DEF_COUNT 1024
333#define LPFC_WQE_DEF_COUNT 256 335#define LPFC_WQE_DEF_COUNT 256
336#define LPFC_WQE128_DEF_COUNT 128
334#define LPFC_MQE_DEF_COUNT 16 337#define LPFC_MQE_DEF_COUNT 16
335#define LPFC_RQE_DEF_COUNT 512 338#define LPFC_RQE_DEF_COUNT 512
336 339
@@ -416,6 +419,9 @@ struct lpfc_pc_sli4_params {
416 uint8_t mqv; 419 uint8_t mqv;
417 uint8_t wqv; 420 uint8_t wqv;
418 uint8_t rqv; 421 uint8_t rqv;
422 uint8_t wqsize;
423#define LPFC_WQ_SZ64_SUPPORT 1
424#define LPFC_WQ_SZ128_SUPPORT 2
419}; 425};
420 426
421struct lpfc_iov { 427struct lpfc_iov {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c6c32eebf3dd..21859d2006ce 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.40" 21#define LPFC_DRIVER_VERSION "8.3.41"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e28e431564b0..a87ee33f4f2a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -387,6 +387,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
387 /* Create binary sysfs attribute for vport */ 387 /* Create binary sysfs attribute for vport */
388 lpfc_alloc_sysfs_attr(vport); 388 lpfc_alloc_sysfs_attr(vport);
389 389
390 /* Set the DFT_LUN_Q_DEPTH accordingly */
391 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
392
390 *(struct lpfc_vport **)fc_vport->dd_data = vport; 393 *(struct lpfc_vport **)fc_vport->dd_data = vport;
391 vport->fc_vport = fc_vport; 394 vport->fc_vport = fc_vport;
392 395
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 31b5b15a4726..7b14a015c903 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2012 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2.h 5 * Name: mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.27 11 * mpi2.h Version: 02.00.28
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -77,6 +77,7 @@
77 * Added Hard Reset delay timings. 77 * Added Hard Reset delay timings.
78 * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. 78 * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
79 * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. 79 * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
80 * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
80 * -------------------------------------------------------------------------- 81 * --------------------------------------------------------------------------
81 */ 82 */
82 83
@@ -102,7 +103,7 @@
102#define MPI2_VERSION_02_00 (0x0200) 103#define MPI2_VERSION_02_00 (0x0200)
103 104
104/* versioning for this MPI header set */ 105/* versioning for this MPI header set */
105#define MPI2_HEADER_VERSION_UNIT (0x1B) 106#define MPI2_HEADER_VERSION_UNIT (0x1C)
106#define MPI2_HEADER_VERSION_DEV (0x00) 107#define MPI2_HEADER_VERSION_DEV (0x00)
107#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 108#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
108#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 109#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 737fa8cfb54a..88cb7f828bbd 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2011 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_cnfg.h 5 * Name: mpi2_cnfg.h
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.22 9 * mpi2_cnfg.h Version: 02.00.23
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -149,6 +149,8 @@
149 * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. 149 * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
150 * Added UEFIVersion field to BIOS Page 1 and defined new 150 * Added UEFIVersion field to BIOS Page 1 and defined new
151 * BiosOptions bits. 151 * BiosOptions bits.
152 * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
153 * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
152 * -------------------------------------------------------------------------- 154 * --------------------------------------------------------------------------
153 */ 155 */
154 156
@@ -698,6 +700,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
698#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) 700#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
699 701
700/* defines for the Flags field */ 702/* defines for the Flags field */
703#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
701#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) 704#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
702 705
703 706
@@ -1224,6 +1227,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
1224#define MPI2_BIOSPAGE1_PAGEVERSION (0x05) 1227#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
1225 1228
1226/* values for BIOS Page 1 BiosOptions field */ 1229/* values for BIOS Page 1 BiosOptions field */
1230#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
1231#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
1232
1227#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) 1233#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
1228#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) 1234#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
1229#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) 1235#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 963761fb8462..9d284dae6553 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2012 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_init.h 5 * Name: mpi2_init.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index e93f8f53adf9..d159c5f24aab 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2012 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_ioc.h 5 * Name: mpi2_ioc.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 255b0ca219a4..0d202a2c6db7 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2012 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_raid.h 5 * Name: mpi2_raid.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index fdffde1ebc0f..50b39ccd526a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2010 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_sas.h 5 * Name: mpi2_sas.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 67c387f10e59..11b2ac4e7c6e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2012 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_tool.h 5 * Name: mpi2_tool.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
index cfde017bf16e..0b128b68a5ea 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2007 LSI Corporation. 2 * Copyright (c) 2000-2013 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_type.h 5 * Name: mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index ccd6d5a97ec3..3901edc35812 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2012 LSI Corporation 6 * Copyright (C) 2007-2013 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -768,10 +768,9 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
768 * @msix_index: MSIX table index supplied by the OS 768 * @msix_index: MSIX table index supplied by the OS
769 * @reply: reply message frame(lower 32bit addr) 769 * @reply: reply message frame(lower 32bit addr)
770 * 770 *
771 * Return 1 meaning mf should be freed from _base_interrupt 771 * Returns void.
772 * 0 means the mf is freed from this function.
773 */ 772 */
774static u8 773static void
775_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) 774_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
776{ 775{
777 Mpi2EventNotificationReply_t *mpi_reply; 776 Mpi2EventNotificationReply_t *mpi_reply;
@@ -780,9 +779,9 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
780 779
781 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 780 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
782 if (!mpi_reply) 781 if (!mpi_reply)
783 return 1; 782 return;
784 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 783 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
785 return 1; 784 return;
786#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 785#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
787 _base_display_event_data(ioc, mpi_reply); 786 _base_display_event_data(ioc, mpi_reply);
788#endif 787#endif
@@ -812,7 +811,7 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
812 /* ctl callback handler */ 811 /* ctl callback handler */
813 mpt2sas_ctl_event_callback(ioc, msix_index, reply); 812 mpt2sas_ctl_event_callback(ioc, msix_index, reply);
814 813
815 return 1; 814 return;
816} 815}
817 816
818/** 817/**
@@ -1409,8 +1408,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1409 int i; 1408 int i;
1410 u8 try_msix = 0; 1409 u8 try_msix = 0;
1411 1410
1412 INIT_LIST_HEAD(&ioc->reply_queue_list);
1413
1414 if (msix_disable == -1 || msix_disable == 0) 1411 if (msix_disable == -1 || msix_disable == 0)
1415 try_msix = 1; 1412 try_msix = 1;
1416 1413
@@ -1489,6 +1486,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1489 if (pci_enable_device_mem(pdev)) { 1486 if (pci_enable_device_mem(pdev)) {
1490 printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: " 1487 printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1491 "failed\n", ioc->name); 1488 "failed\n", ioc->name);
1489 ioc->bars = 0;
1492 return -ENODEV; 1490 return -ENODEV;
1493 } 1491 }
1494 1492
@@ -1497,6 +1495,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1497 MPT2SAS_DRIVER_NAME)) { 1495 MPT2SAS_DRIVER_NAME)) {
1498 printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: " 1496 printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1499 "failed\n", ioc->name); 1497 "failed\n", ioc->name);
1498 ioc->bars = 0;
1500 r = -ENODEV; 1499 r = -ENODEV;
1501 goto out_fail; 1500 goto out_fail;
1502 } 1501 }
@@ -4229,18 +4228,25 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
4229 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 4228 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4230 __func__)); 4229 __func__));
4231 4230
4232 _base_mask_interrupts(ioc); 4231 if (ioc->chip_phys && ioc->chip) {
4233 ioc->shost_recovery = 1; 4232 _base_mask_interrupts(ioc);
4234 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 4233 ioc->shost_recovery = 1;
4235 ioc->shost_recovery = 0; 4234 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4235 ioc->shost_recovery = 0;
4236 }
4237
4236 _base_free_irq(ioc); 4238 _base_free_irq(ioc);
4237 _base_disable_msix(ioc); 4239 _base_disable_msix(ioc);
4238 if (ioc->chip_phys) 4240
4241 if (ioc->chip_phys && ioc->chip)
4239 iounmap(ioc->chip); 4242 iounmap(ioc->chip);
4240 ioc->chip_phys = 0; 4243 ioc->chip_phys = 0;
4241 pci_release_selected_regions(ioc->pdev, ioc->bars); 4244
4242 pci_disable_pcie_error_reporting(pdev); 4245 if (pci_is_enabled(pdev)) {
4243 pci_disable_device(pdev); 4246 pci_release_selected_regions(ioc->pdev, ioc->bars);
4247 pci_disable_pcie_error_reporting(pdev);
4248 pci_disable_device(pdev);
4249 }
4244 return; 4250 return;
4245} 4251}
4246 4252
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 6fbd08417773..1f2ac3a28621 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
6 * Copyright (C) 2007-2012 LSI Corporation 6 * Copyright (C) 2007-2013 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "15.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "16.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 15 73#define MPT2SAS_MAJOR_VERSION 16
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
@@ -1061,7 +1061,7 @@ void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
1061int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc); 1061int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
1062 1062
1063/* scsih shared API */ 1063/* scsih shared API */
1064u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, 1064void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
1065 u32 reply); 1065 u32 reply);
1066int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, 1066int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1067 uint channel, uint id, uint lun, u8 type, u16 smid_task, 1067 uint channel, uint id, uint lun, u8 type, u16 smid_task,
@@ -1144,7 +1144,7 @@ void mpt2sas_ctl_exit(void);
1144u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1144u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1145 u32 reply); 1145 u32 reply);
1146void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); 1146void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
1147u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, 1147void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
1148 u32 reply); 1148 u32 reply);
1149void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, 1149void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
1150 Mpi2EventNotificationReply_t *mpi_reply); 1150 Mpi2EventNotificationReply_t *mpi_reply);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 863778071a9d..0c47425c73f2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
2 * This module provides common API for accessing firmware configuration pages 2 * This module provides common API for accessing firmware configuration pages
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
5 * Copyright (C) 2007-2012 LSI Corporation 5 * Copyright (C) 2007-2013 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index eec052c2670a..b7f887c9b0bf 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
6 * Copyright (C) 2007-2012 LSI Corporation 6 * Copyright (C) 2007-2013 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -397,18 +397,22 @@ mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
397 * This function merely adds a new work task into ioc->firmware_event_thread. 397 * This function merely adds a new work task into ioc->firmware_event_thread.
398 * The tasks are worked from _firmware_event_work in user context. 398 * The tasks are worked from _firmware_event_work in user context.
399 * 399 *
400 * Return 1 meaning mf should be freed from _base_interrupt 400 * Returns void.
401 * 0 means the mf is freed from this function.
402 */ 401 */
403u8 402void
404mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, 403mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
405 u32 reply) 404 u32 reply)
406{ 405{
407 Mpi2EventNotificationReply_t *mpi_reply; 406 Mpi2EventNotificationReply_t *mpi_reply;
408 407
409 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 408 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
409 if (unlikely(!mpi_reply)) {
410 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
411 ioc->name, __FILE__, __LINE__, __func__);
412 return;
413 }
410 mpt2sas_ctl_add_to_event_log(ioc, mpi_reply); 414 mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
411 return 1; 415 return;
412} 416}
413 417
414/** 418/**
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index b5eb0d1b8ea6..8b2ac1869dcc 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
6 * Copyright (C) 2007-2012 LSI Corporation 6 * Copyright (C) 2007-2013 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 69cc7d0c112c..a9021cbd6628 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
2 * Logging Support for MPT (Message Passing Technology) based controllers 2 * Logging Support for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
5 * Copyright (C) 2007-2012 LSI Corporation 5 * Copyright (C) 2007-2013 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 51004768d0f5..7f0af4fcc001 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
5 * Copyright (C) 2007-2012 LSI Corporation 5 * Copyright (C) 2007-2013 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -628,11 +628,12 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
628 * devices while scanning is turned on due to an oops in 628 * devices while scanning is turned on due to an oops in
629 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() 629 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
630 */ 630 */
631 if (!ioc->is_driver_loading) 631 if (!ioc->is_driver_loading) {
632 mpt2sas_transport_port_remove(ioc, 632 mpt2sas_transport_port_remove(ioc,
633 sas_device->sas_address, 633 sas_device->sas_address,
634 sas_device->sas_address_parent); 634 sas_device->sas_address_parent);
635 _scsih_sas_device_remove(ioc, sas_device); 635 _scsih_sas_device_remove(ioc, sas_device);
636 }
636 } 637 }
637} 638}
638 639
@@ -1402,6 +1403,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1402 struct MPT2SAS_DEVICE *sas_device_priv_data; 1403 struct MPT2SAS_DEVICE *sas_device_priv_data;
1403 struct scsi_target *starget; 1404 struct scsi_target *starget;
1404 struct _raid_device *raid_device; 1405 struct _raid_device *raid_device;
1406 struct _sas_device *sas_device;
1405 unsigned long flags; 1407 unsigned long flags;
1406 1408
1407 sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); 1409 sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1430,6 +1432,19 @@ _scsih_slave_alloc(struct scsi_device *sdev)
1430 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1432 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1431 } 1433 }
1432 1434
1435 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1436 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1437 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
1438 sas_target_priv_data->sas_address);
1439 if (sas_device && (sas_device->starget == NULL)) {
1440 sdev_printk(KERN_INFO, sdev,
1441 "%s : sas_device->starget set to starget @ %d\n",
1442 __func__, __LINE__);
1443 sas_device->starget = starget;
1444 }
1445 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1446 }
1447
1433 return 0; 1448 return 0;
1434} 1449}
1435 1450
@@ -6753,7 +6768,7 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
6753 handle))) { 6768 handle))) {
6754 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6769 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6755 MPI2_IOCSTATUS_MASK; 6770 MPI2_IOCSTATUS_MASK;
6756 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 6771 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6757 break; 6772 break;
6758 handle = le16_to_cpu(sas_device_pg0.DevHandle); 6773 handle = le16_to_cpu(sas_device_pg0.DevHandle);
6759 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 6774 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -6862,7 +6877,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
6862 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 6877 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
6863 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6878 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6864 MPI2_IOCSTATUS_MASK; 6879 MPI2_IOCSTATUS_MASK;
6865 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 6880 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6866 break; 6881 break;
6867 handle = le16_to_cpu(volume_pg1.DevHandle); 6882 handle = le16_to_cpu(volume_pg1.DevHandle);
6868 6883
@@ -6887,7 +6902,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
6887 phys_disk_num))) { 6902 phys_disk_num))) {
6888 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6903 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6889 MPI2_IOCSTATUS_MASK; 6904 MPI2_IOCSTATUS_MASK;
6890 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 6905 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6891 break; 6906 break;
6892 phys_disk_num = pd_pg0.PhysDiskNum; 6907 phys_disk_num = pd_pg0.PhysDiskNum;
6893 handle = le16_to_cpu(pd_pg0.DevHandle); 6908 handle = le16_to_cpu(pd_pg0.DevHandle);
@@ -6967,7 +6982,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
6967 6982
6968 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6983 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6969 MPI2_IOCSTATUS_MASK; 6984 MPI2_IOCSTATUS_MASK;
6970 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 6985 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6971 break; 6986 break;
6972 6987
6973 handle = le16_to_cpu(expander_pg0.DevHandle); 6988 handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -7109,8 +7124,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7109 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 7124 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
7110 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7125 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7111 MPI2_IOCSTATUS_MASK; 7126 MPI2_IOCSTATUS_MASK;
7112 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
7113 break;
7114 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7127 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7115 printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: " 7128 printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
7116 "ioc_status(0x%04x), loginfo(0x%08x)\n", 7129 "ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7153,8 +7166,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7153 phys_disk_num))) { 7166 phys_disk_num))) {
7154 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7167 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7155 MPI2_IOCSTATUS_MASK; 7168 MPI2_IOCSTATUS_MASK;
7156 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
7157 break;
7158 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7169 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7159 printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:" 7170 printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
7160 "ioc_status(0x%04x), loginfo(0x%08x)\n", 7171 "ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7219,8 +7230,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7219 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 7230 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
7220 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7231 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7221 MPI2_IOCSTATUS_MASK; 7232 MPI2_IOCSTATUS_MASK;
7222 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
7223 break;
7224 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7233 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7225 printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: " 7234 printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
7226 "ioc_status(0x%04x), loginfo(0x%08x)\n", 7235 "ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7278,8 +7287,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
7278 handle))) { 7287 handle))) {
7279 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7288 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7280 MPI2_IOCSTATUS_MASK; 7289 MPI2_IOCSTATUS_MASK;
7281 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
7282 break;
7283 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7290 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7284 printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:" 7291 printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
7285 " ioc_status(0x%04x), loginfo(0x%08x)\n", 7292 " ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7471,10 +7478,9 @@ _firmware_event_work(struct work_struct *work)
7471 * This function merely adds a new work task into ioc->firmware_event_thread. 7478 * This function merely adds a new work task into ioc->firmware_event_thread.
7472 * The tasks are worked from _firmware_event_work in user context. 7479 * The tasks are worked from _firmware_event_work in user context.
7473 * 7480 *
7474 * Return 1 meaning mf should be freed from _base_interrupt 7481 * Returns void.
7475 * 0 means the mf is freed from this function.
7476 */ 7482 */
7477u8 7483void
7478mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, 7484mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7479 u32 reply) 7485 u32 reply)
7480{ 7486{
@@ -7485,14 +7491,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7485 7491
7486 /* events turned off due to host reset or driver unloading */ 7492 /* events turned off due to host reset or driver unloading */
7487 if (ioc->remove_host || ioc->pci_error_recovery) 7493 if (ioc->remove_host || ioc->pci_error_recovery)
7488 return 1; 7494 return;
7489 7495
7490 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 7496 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
7491 7497
7492 if (unlikely(!mpi_reply)) { 7498 if (unlikely(!mpi_reply)) {
7493 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", 7499 printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
7494 ioc->name, __FILE__, __LINE__, __func__); 7500 ioc->name, __FILE__, __LINE__, __func__);
7495 return 1; 7501 return;
7496 } 7502 }
7497 7503
7498 event = le16_to_cpu(mpi_reply->Event); 7504 event = le16_to_cpu(mpi_reply->Event);
@@ -7507,11 +7513,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7507 7513
7508 if (baen_data->Primitive != 7514 if (baen_data->Primitive !=
7509 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) 7515 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
7510 return 1; 7516 return;
7511 7517
7512 if (ioc->broadcast_aen_busy) { 7518 if (ioc->broadcast_aen_busy) {
7513 ioc->broadcast_aen_pending++; 7519 ioc->broadcast_aen_pending++;
7514 return 1; 7520 return;
7515 } else 7521 } else
7516 ioc->broadcast_aen_busy = 1; 7522 ioc->broadcast_aen_busy = 1;
7517 break; 7523 break;
@@ -7587,14 +7593,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7587 break; 7593 break;
7588 7594
7589 default: /* ignore the rest */ 7595 default: /* ignore the rest */
7590 return 1; 7596 return;
7591 } 7597 }
7592 7598
7593 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 7599 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
7594 if (!fw_event) { 7600 if (!fw_event) {
7595 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 7601 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
7596 ioc->name, __FILE__, __LINE__, __func__); 7602 ioc->name, __FILE__, __LINE__, __func__);
7597 return 1; 7603 return;
7598 } 7604 }
7599 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 7605 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
7600 fw_event->event_data = kzalloc(sz, GFP_ATOMIC); 7606 fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
@@ -7602,7 +7608,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7602 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 7608 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
7603 ioc->name, __FILE__, __LINE__, __func__); 7609 ioc->name, __FILE__, __LINE__, __func__);
7604 kfree(fw_event); 7610 kfree(fw_event);
7605 return 1; 7611 return;
7606 } 7612 }
7607 7613
7608 memcpy(fw_event->event_data, mpi_reply->EventData, 7614 memcpy(fw_event->event_data, mpi_reply->EventData,
@@ -7612,7 +7618,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
7612 fw_event->VP_ID = mpi_reply->VP_ID; 7618 fw_event->VP_ID = mpi_reply->VP_ID;
7613 fw_event->event = event; 7619 fw_event->event = event;
7614 _scsih_fw_event_add(ioc, fw_event); 7620 _scsih_fw_event_add(ioc, fw_event);
7615 return 1; 7621 return;
7616} 7622}
7617 7623
7618/* shost template */ 7624/* shost template */
@@ -7711,10 +7717,6 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
7711 if (!ioc->ir_firmware) 7717 if (!ioc->ir_firmware)
7712 return; 7718 return;
7713 7719
7714 /* are there any volumes ? */
7715 if (list_empty(&ioc->raid_device_list))
7716 return;
7717
7718 mutex_lock(&ioc->scsih_cmds.mutex); 7720 mutex_lock(&ioc->scsih_cmds.mutex);
7719 7721
7720 if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) { 7722 if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) {
@@ -7929,10 +7931,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
7929 sas_device->sas_address_parent)) { 7931 sas_device->sas_address_parent)) {
7930 _scsih_sas_device_remove(ioc, sas_device); 7932 _scsih_sas_device_remove(ioc, sas_device);
7931 } else if (!sas_device->starget) { 7933 } else if (!sas_device->starget) {
7932 if (!ioc->is_driver_loading) 7934 if (!ioc->is_driver_loading) {
7933 mpt2sas_transport_port_remove(ioc, sas_address, 7935 mpt2sas_transport_port_remove(ioc,
7936 sas_address,
7934 sas_address_parent); 7937 sas_address_parent);
7935 _scsih_sas_device_remove(ioc, sas_device); 7938 _scsih_sas_device_remove(ioc, sas_device);
7939 }
7936 } 7940 }
7937 } 7941 }
7938} 7942}
@@ -7985,14 +7989,14 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
7985 kfree(sas_device); 7989 kfree(sas_device);
7986 continue; 7990 continue;
7987 } else if (!sas_device->starget) { 7991 } else if (!sas_device->starget) {
7988 if (!ioc->is_driver_loading) 7992 if (!ioc->is_driver_loading) {
7989 mpt2sas_transport_port_remove(ioc, 7993 mpt2sas_transport_port_remove(ioc,
7990 sas_device->sas_address, 7994 sas_device->sas_address,
7991 sas_device->sas_address_parent); 7995 sas_device->sas_address_parent);
7992 list_del(&sas_device->list); 7996 list_del(&sas_device->list);
7993 kfree(sas_device); 7997 kfree(sas_device);
7994 continue; 7998 continue;
7995 7999 }
7996 } 8000 }
7997 spin_lock_irqsave(&ioc->sas_device_lock, flags); 8001 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7998 list_move_tail(&sas_device->list, &ioc->sas_device_list); 8002 list_move_tail(&sas_device->list, &ioc->sas_device_list);
@@ -8175,6 +8179,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8175 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 8179 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
8176 INIT_LIST_HEAD(&ioc->delayed_tr_list); 8180 INIT_LIST_HEAD(&ioc->delayed_tr_list);
8177 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 8181 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
8182 INIT_LIST_HEAD(&ioc->reply_queue_list);
8178 8183
8179 /* init shost parameters */ 8184 /* init shost parameters */
8180 shost->max_cmd_len = 32; 8185 shost->max_cmd_len = 32;
@@ -8280,6 +8285,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
8280 8285
8281 mpt2sas_base_stop_watchdog(ioc); 8286 mpt2sas_base_stop_watchdog(ioc);
8282 scsi_block_requests(shost); 8287 scsi_block_requests(shost);
8288 _scsih_ir_shutdown(ioc);
8283 device_state = pci_choose_state(pdev, state); 8289 device_state = pci_choose_state(pdev, state);
8284 printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering " 8290 printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
8285 "operating state [D%d]\n", ioc->name, pdev, 8291 "operating state [D%d]\n", ioc->name, pdev,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 193e7ae90c3b..9d26637308be 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers 2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
5 * Copyright (C) 2007-2012 LSI Corporation 5 * Copyright (C) 2007-2013 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -1006,9 +1006,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
1006 &mpt2sas_phy->remote_identify); 1006 &mpt2sas_phy->remote_identify);
1007 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1007 _transport_add_phy_to_an_existing_port(ioc, sas_node,
1008 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); 1008 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
1009 } else 1009 } else {
1010 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct 1010 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
1011 sas_identify)); 1011 sas_identify));
1012 _transport_del_phy_from_an_existing_port(ioc, sas_node,
1013 mpt2sas_phy);
1014 }
1012 1015
1013 if (mpt2sas_phy->phy) 1016 if (mpt2sas_phy->phy)
1014 mpt2sas_phy->phy->negotiated_linkrate = 1017 mpt2sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5dc280c75325..fa785062e97b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -82,6 +82,10 @@ static int msix_disable = -1;
82module_param(msix_disable, int, 0); 82module_param(msix_disable, int, 0);
83MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 83MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
84 84
85static int max_msix_vectors = 8;
86module_param(max_msix_vectors, int, 0);
87MODULE_PARM_DESC(max_msix_vectors,
88 " max msix vectors - (default=8)");
85 89
86static int mpt3sas_fwfault_debug; 90static int mpt3sas_fwfault_debug;
87MODULE_PARM_DESC(mpt3sas_fwfault_debug, 91MODULE_PARM_DESC(mpt3sas_fwfault_debug,
@@ -1709,8 +1713,6 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1709 int i; 1713 int i;
1710 u8 try_msix = 0; 1714 u8 try_msix = 0;
1711 1715
1712 INIT_LIST_HEAD(&ioc->reply_queue_list);
1713
1714 if (msix_disable == -1 || msix_disable == 0) 1716 if (msix_disable == -1 || msix_disable == 0)
1715 try_msix = 1; 1717 try_msix = 1;
1716 1718
@@ -1723,6 +1725,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1723 ioc->reply_queue_count = min_t(int, ioc->cpu_count, 1725 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1724 ioc->msix_vector_count); 1726 ioc->msix_vector_count);
1725 1727
1728 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1729 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1730 ioc->cpu_count, max_msix_vectors);
1731
1732 if (max_msix_vectors > 0) {
1733 ioc->reply_queue_count = min_t(int, max_msix_vectors,
1734 ioc->reply_queue_count);
1735 ioc->msix_vector_count = ioc->reply_queue_count;
1736 }
1737
1726 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), 1738 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1727 GFP_KERNEL); 1739 GFP_KERNEL);
1728 if (!entries) { 1740 if (!entries) {
@@ -1790,6 +1802,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1790 if (pci_enable_device_mem(pdev)) { 1802 if (pci_enable_device_mem(pdev)) {
1791 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", 1803 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
1792 ioc->name); 1804 ioc->name);
1805 ioc->bars = 0;
1793 return -ENODEV; 1806 return -ENODEV;
1794 } 1807 }
1795 1808
@@ -1798,6 +1811,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1798 MPT3SAS_DRIVER_NAME)) { 1811 MPT3SAS_DRIVER_NAME)) {
1799 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", 1812 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
1800 ioc->name); 1813 ioc->name);
1814 ioc->bars = 0;
1801 r = -ENODEV; 1815 r = -ENODEV;
1802 goto out_fail; 1816 goto out_fail;
1803 } 1817 }
@@ -4393,18 +4407,25 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
4393 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4407 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4394 __func__)); 4408 __func__));
4395 4409
4396 _base_mask_interrupts(ioc); 4410 if (ioc->chip_phys && ioc->chip) {
4397 ioc->shost_recovery = 1; 4411 _base_mask_interrupts(ioc);
4398 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 4412 ioc->shost_recovery = 1;
4399 ioc->shost_recovery = 0; 4413 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4414 ioc->shost_recovery = 0;
4415 }
4416
4400 _base_free_irq(ioc); 4417 _base_free_irq(ioc);
4401 _base_disable_msix(ioc); 4418 _base_disable_msix(ioc);
4402 if (ioc->chip_phys) 4419
4420 if (ioc->chip_phys && ioc->chip)
4403 iounmap(ioc->chip); 4421 iounmap(ioc->chip);
4404 ioc->chip_phys = 0; 4422 ioc->chip_phys = 0;
4405 pci_release_selected_regions(ioc->pdev, ioc->bars); 4423
4406 pci_disable_pcie_error_reporting(pdev); 4424 if (pci_is_enabled(pdev)) {
4407 pci_disable_device(pdev); 4425 pci_release_selected_regions(ioc->pdev, ioc->bars);
4426 pci_disable_pcie_error_reporting(pdev);
4427 pci_disable_device(pdev);
4428 }
4408 return; 4429 return;
4409} 4430}
4410 4431
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8cbe8fd21fc4..a961fe11b527 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7779,6 +7779,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7779 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 7779 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
7780 INIT_LIST_HEAD(&ioc->delayed_tr_list); 7780 INIT_LIST_HEAD(&ioc->delayed_tr_list);
7781 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 7781 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
7782 INIT_LIST_HEAD(&ioc->reply_queue_list);
7782 7783
7783 /* init shost parameters */ 7784 /* init shost parameters */
7784 shost->max_cmd_len = 32; 7785 shost->max_cmd_len = 32;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index dcadd56860ff..e771a88c6a74 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1003,9 +1003,12 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
1003 &mpt3sas_phy->remote_identify); 1003 &mpt3sas_phy->remote_identify);
1004 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1004 _transport_add_phy_to_an_existing_port(ioc, sas_node,
1005 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); 1005 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
1006 } else 1006 } else {
1007 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct 1007 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
1008 sas_identify)); 1008 sas_identify));
1009 _transport_del_phy_from_an_existing_port(ioc, sas_node,
1010 mpt3sas_phy);
1011 }
1009 1012
1010 if (mpt3sas_phy->phy) 1013 if (mpt3sas_phy->phy)
1011 mpt3sas_phy->phy->negotiated_linkrate = 1014 mpt3sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3861aa1f4520..f7c189606b84 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -424,7 +424,8 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
424 PM8001_INIT_DBG(pm8001_ha, pm8001_printk( 424 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
425 "base addr %llx virt_addr=%llx len=%d\n", 425 "base addr %llx virt_addr=%llx len=%d\n",
426 (u64)pm8001_ha->io_mem[logicalBar].membase, 426 (u64)pm8001_ha->io_mem[logicalBar].membase,
427 (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr, 427 (u64)(unsigned long)
428 pm8001_ha->io_mem[logicalBar].memvirtaddr,
428 pm8001_ha->io_mem[logicalBar].memsize)); 429 pm8001_ha->io_mem[logicalBar].memsize));
429 } else { 430 } else {
430 pm8001_ha->io_mem[logicalBar].membase = 0; 431 pm8001_ha->io_mem[logicalBar].membase = 0;
@@ -734,7 +735,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
734 pdev = pm8001_ha->pdev; 735 pdev = pm8001_ha->pdev;
735 736
736#ifdef PM8001_USE_MSIX 737#ifdef PM8001_USE_MSIX
737 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) 738 if (pdev->msix_cap)
738 return pm8001_setup_msix(pm8001_ha); 739 return pm8001_setup_msix(pm8001_ha);
739 else { 740 else {
740 PM8001_INIT_DBG(pm8001_ha, 741 PM8001_INIT_DBG(pm8001_ha,
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c37b244cf8ae..ff0fc7c7812f 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o qla_mr.o qla_target.o 3 qla_nx.o qla_mr.o qla_nx2.o qla_target.o
4 4
5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o 6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d7a99ae7f39d..5f174b83f56f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -29,7 +29,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) 29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0; 30 return 0;
31 31
32 if (IS_QLA82XX(ha)) { 32 if (IS_P3P_TYPE(ha)) {
33 if (off < ha->md_template_size) { 33 if (off < ha->md_template_size) {
34 rval = memory_read_from_buffer(buf, count, 34 rval = memory_read_from_buffer(buf, count,
35 &off, ha->md_tmplt_hdr, ha->md_template_size); 35 &off, ha->md_tmplt_hdr, ha->md_template_size);
@@ -71,7 +71,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
71 ql_log(ql_log_info, vha, 0x705d, 71 ql_log(ql_log_info, vha, 0x705d,
72 "Firmware dump cleared on (%ld).\n", vha->host_no); 72 "Firmware dump cleared on (%ld).\n", vha->host_no);
73 73
74 if (IS_QLA82XX(vha->hw)) { 74 if (IS_P3P_TYPE(ha)) {
75 qla82xx_md_free(vha); 75 qla82xx_md_free(vha);
76 qla82xx_md_prep(vha); 76 qla82xx_md_prep(vha);
77 } 77 }
@@ -95,11 +95,15 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
95 qla82xx_idc_lock(ha); 95 qla82xx_idc_lock(ha);
96 qla82xx_set_reset_owner(vha); 96 qla82xx_set_reset_owner(vha);
97 qla82xx_idc_unlock(ha); 97 qla82xx_idc_unlock(ha);
98 } else if (IS_QLA8044(ha)) {
99 qla8044_idc_lock(ha);
100 qla82xx_set_reset_owner(vha);
101 qla8044_idc_unlock(ha);
98 } else 102 } else
99 qla2x00_system_error(vha); 103 qla2x00_system_error(vha);
100 break; 104 break;
101 case 4: 105 case 4:
102 if (IS_QLA82XX(ha)) { 106 if (IS_P3P_TYPE(ha)) {
103 if (ha->md_tmplt_hdr) 107 if (ha->md_tmplt_hdr)
104 ql_dbg(ql_dbg_user, vha, 0x705b, 108 ql_dbg(ql_dbg_user, vha, 0x705b,
105 "MiniDump supported with this firmware.\n"); 109 "MiniDump supported with this firmware.\n");
@@ -109,7 +113,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
109 } 113 }
110 break; 114 break;
111 case 5: 115 case 5:
112 if (IS_QLA82XX(ha)) 116 if (IS_P3P_TYPE(ha))
113 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 117 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
114 break; 118 break;
115 case 6: 119 case 6:
@@ -586,7 +590,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
586 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 590 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
587 int type; 591 int type;
588 uint32_t idc_control; 592 uint32_t idc_control;
589 593 uint8_t *tmp_data = NULL;
590 if (off != 0) 594 if (off != 0)
591 return -EINVAL; 595 return -EINVAL;
592 596
@@ -597,14 +601,23 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
597 "Issuing ISP reset.\n"); 601 "Issuing ISP reset.\n");
598 602
599 scsi_block_requests(vha->host); 603 scsi_block_requests(vha->host);
600 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
601 if (IS_QLA82XX(ha)) { 604 if (IS_QLA82XX(ha)) {
602 ha->flags.isp82xx_no_md_cap = 1; 605 ha->flags.isp82xx_no_md_cap = 1;
603 qla82xx_idc_lock(ha); 606 qla82xx_idc_lock(ha);
604 qla82xx_set_reset_owner(vha); 607 qla82xx_set_reset_owner(vha);
605 qla82xx_idc_unlock(ha); 608 qla82xx_idc_unlock(ha);
609 } else if (IS_QLA8044(ha)) {
610 qla8044_idc_lock(ha);
611 idc_control = qla8044_rd_reg(ha,
612 QLA8044_IDC_DRV_CTRL);
613 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
614 (idc_control | GRACEFUL_RESET_BIT1));
615 qla82xx_set_reset_owner(vha);
616 qla8044_idc_unlock(ha);
617 } else {
618 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
619 qla2xxx_wake_dpc(vha);
606 } 620 }
607 qla2xxx_wake_dpc(vha);
608 qla2x00_wait_for_chip_reset(vha); 621 qla2x00_wait_for_chip_reset(vha);
609 scsi_unblock_requests(vha->host); 622 scsi_unblock_requests(vha->host);
610 break; 623 break;
@@ -640,7 +653,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
640 break; 653 break;
641 } 654 }
642 case 0x2025e: 655 case 0x2025e:
643 if (!IS_QLA82XX(ha) || vha != base_vha) { 656 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
644 ql_log(ql_log_info, vha, 0x7071, 657 ql_log(ql_log_info, vha, 0x7071,
645 "FCoE ctx reset no supported.\n"); 658 "FCoE ctx reset no supported.\n");
646 return -EPERM; 659 return -EPERM;
@@ -674,7 +687,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
674 __qla83xx_set_idc_control(vha, idc_control); 687 __qla83xx_set_idc_control(vha, idc_control);
675 qla83xx_idc_unlock(vha, 0); 688 qla83xx_idc_unlock(vha, 0);
676 break; 689 break;
677 690 case 0x20261:
691 ql_dbg(ql_dbg_user, vha, 0x70e0,
692 "Updating cache versions without reset ");
693
694 tmp_data = vmalloc(256);
695 if (!tmp_data) {
696 ql_log(ql_log_warn, vha, 0x70e1,
697 "Unable to allocate memory for VPD information update.\n");
698 return -ENOMEM;
699 }
700 ha->isp_ops->get_flash_version(vha, tmp_data);
701 vfree(tmp_data);
702 break;
678 } 703 }
679 return count; 704 return count;
680} 705}
@@ -1212,7 +1237,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1212 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1237 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1213 struct qla_hw_data *ha = vha->hw; 1238 struct qla_hw_data *ha = vha->hw;
1214 1239
1215 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 1240 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
1216 return snprintf(buf, PAGE_SIZE, "\n"); 1241 return snprintf(buf, PAGE_SIZE, "\n");
1217 1242
1218 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1243 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1265,10 +1290,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
1265 if (!IS_CNA_CAPABLE(vha->hw)) 1290 if (!IS_CNA_CAPABLE(vha->hw))
1266 return snprintf(buf, PAGE_SIZE, "\n"); 1291 return snprintf(buf, PAGE_SIZE, "\n");
1267 1292
1268 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", 1293 return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1269 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1270 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1271 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1272} 1294}
1273 1295
1274static ssize_t 1296static ssize_t
@@ -1287,12 +1309,6 @@ qla2x00_thermal_temp_show(struct device *dev,
1287 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1309 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1288 uint16_t temp = 0; 1310 uint16_t temp = 0;
1289 1311
1290 if (!vha->hw->thermal_support) {
1291 ql_log(ql_log_warn, vha, 0x70db,
1292 "Thermal not supported by this card.\n");
1293 goto done;
1294 }
1295
1296 if (qla2x00_reset_active(vha)) { 1312 if (qla2x00_reset_active(vha)) {
1297 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); 1313 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1298 goto done; 1314 goto done;
@@ -1725,11 +1741,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1725 pfc_host_stat->lip_count = stats->lip_cnt; 1741 pfc_host_stat->lip_count = stats->lip_cnt;
1726 pfc_host_stat->tx_frames = stats->tx_frames; 1742 pfc_host_stat->tx_frames = stats->tx_frames;
1727 pfc_host_stat->rx_frames = stats->rx_frames; 1743 pfc_host_stat->rx_frames = stats->rx_frames;
1728 pfc_host_stat->dumped_frames = stats->dumped_frames; 1744 pfc_host_stat->dumped_frames = stats->discarded_frames;
1729 pfc_host_stat->nos_count = stats->nos_rcvd; 1745 pfc_host_stat->nos_count = stats->nos_rcvd;
1746 pfc_host_stat->error_frames =
1747 stats->dropped_frames + stats->discarded_frames;
1748 pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
1749 pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
1730 } 1750 }
1751 pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
1752 pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
1753 pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
1731 pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; 1754 pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
1732 pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; 1755 pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
1756 pfc_host_stat->seconds_since_last_reset =
1757 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
1758 do_div(pfc_host_stat->seconds_since_last_reset, HZ);
1733 1759
1734done_free: 1760done_free:
1735 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1761 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1738,6 +1764,16 @@ done:
1738} 1764}
1739 1765
1740static void 1766static void
1767qla2x00_reset_host_stats(struct Scsi_Host *shost)
1768{
1769 scsi_qla_host_t *vha = shost_priv(shost);
1770
1771 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1772
1773 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
1774}
1775
1776static void
1741qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1777qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1742{ 1778{
1743 scsi_qla_host_t *vha = shost_priv(shost); 1779 scsi_qla_host_t *vha = shost_priv(shost);
@@ -2043,6 +2079,7 @@ struct fc_function_template qla2xxx_transport_functions = {
2043 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2079 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2044 .terminate_rport_io = qla2x00_terminate_rport_io, 2080 .terminate_rport_io = qla2x00_terminate_rport_io,
2045 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2081 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2082 .reset_fc_host_stats = qla2x00_reset_host_stats,
2046 2083
2047 .vport_create = qla24xx_vport_create, 2084 .vport_create = qla24xx_vport_create,
2048 .vport_disable = qla24xx_vport_disable, 2085 .vport_disable = qla24xx_vport_disable,
@@ -2089,6 +2126,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
2089 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, 2126 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2090 .terminate_rport_io = qla2x00_terminate_rport_io, 2127 .terminate_rport_io = qla2x00_terminate_rport_io,
2091 .get_fc_host_stats = qla2x00_get_fc_host_stats, 2128 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2129 .reset_fc_host_stats = qla2x00_reset_host_stats,
2130
2092 .bsg_request = qla24xx_bsg_request, 2131 .bsg_request = qla24xx_bsg_request,
2093 .bsg_timeout = qla24xx_bsg_timeout, 2132 .bsg_timeout = qla24xx_bsg_timeout,
2094}; 2133};
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 417eaad50ae2..b989add77ec3 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -125,7 +125,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
125 uint32_t len; 125 uint32_t len;
126 uint32_t oper; 126 uint32_t oper;
127 127
128 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) { 128 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
129 ret = -EINVAL; 129 ret = -EINVAL;
130 goto exit_fcp_prio_cfg; 130 goto exit_fcp_prio_cfg;
131 } 131 }
@@ -559,7 +559,7 @@ qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
559 uint16_t new_config[4]; 559 uint16_t new_config[4];
560 struct qla_hw_data *ha = vha->hw; 560 struct qla_hw_data *ha = vha->hw;
561 561
562 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 562 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
563 goto done_reset_internal; 563 goto done_reset_internal;
564 564
565 memset(new_config, 0 , sizeof(new_config)); 565 memset(new_config, 0 , sizeof(new_config));
@@ -627,9 +627,10 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
627{ 627{
628 int ret = 0; 628 int ret = 0;
629 int rval = 0; 629 int rval = 0;
630 unsigned long rem_tmo = 0, current_tmo = 0;
630 struct qla_hw_data *ha = vha->hw; 631 struct qla_hw_data *ha = vha->hw;
631 632
632 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 633 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
633 goto done_set_internal; 634 goto done_set_internal;
634 635
635 if (mode == INTERNAL_LOOPBACK) 636 if (mode == INTERNAL_LOOPBACK)
@@ -652,8 +653,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
652 } 653 }
653 654
654 /* Wait for DCBX complete event */ 655 /* Wait for DCBX complete event */
655 if (!wait_for_completion_timeout(&ha->dcbx_comp, 656 current_tmo = DCBX_COMP_TIMEOUT * HZ;
656 (DCBX_COMP_TIMEOUT * HZ))) { 657 while (1) {
658 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
659 current_tmo);
660 if (!ha->idc_extend_tmo || rem_tmo) {
661 ha->idc_extend_tmo = 0;
662 break;
663 }
664 current_tmo = ha->idc_extend_tmo * HZ;
665 ha->idc_extend_tmo = 0;
666 }
667
668 if (!rem_tmo) {
657 ql_dbg(ql_dbg_user, vha, 0x7022, 669 ql_dbg(ql_dbg_user, vha, 0x7022,
658 "DCBX completion not received.\n"); 670 "DCBX completion not received.\n");
659 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 671 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
@@ -678,6 +690,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
678 } 690 }
679 691
680 ha->notify_dcbx_comp = 0; 692 ha->notify_dcbx_comp = 0;
693 ha->idc_extend_tmo = 0;
681 694
682done_set_internal: 695done_set_internal:
683 return rval; 696 return rval;
@@ -773,7 +786,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
773 786
774 if (atomic_read(&vha->loop_state) == LOOP_READY && 787 if (atomic_read(&vha->loop_state) == LOOP_READY &&
775 (ha->current_topology == ISP_CFG_F || 788 (ha->current_topology == ISP_CFG_F ||
776 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && 789 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
777 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 790 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
778 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 791 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
779 elreq.options == EXTERNAL_LOOPBACK) { 792 elreq.options == EXTERNAL_LOOPBACK) {
@@ -783,7 +796,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
783 command_sent = INT_DEF_LB_ECHO_CMD; 796 command_sent = INT_DEF_LB_ECHO_CMD;
784 rval = qla2x00_echo_test(vha, &elreq, response); 797 rval = qla2x00_echo_test(vha, &elreq, response);
785 } else { 798 } else {
786 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { 799 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
787 memset(config, 0, sizeof(config)); 800 memset(config, 0, sizeof(config));
788 memset(new_config, 0, sizeof(new_config)); 801 memset(new_config, 0, sizeof(new_config));
789 802
@@ -806,7 +819,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
806 "elreq.options=%04x\n", elreq.options); 819 "elreq.options=%04x\n", elreq.options);
807 820
808 if (elreq.options == EXTERNAL_LOOPBACK) 821 if (elreq.options == EXTERNAL_LOOPBACK)
809 if (IS_QLA8031(ha)) 822 if (IS_QLA8031(ha) || IS_QLA8044(ha))
810 rval = qla81xx_set_loopback_mode(vha, 823 rval = qla81xx_set_loopback_mode(vha,
811 config, new_config, elreq.options); 824 config, new_config, elreq.options);
812 else 825 else
@@ -1266,6 +1279,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1266 int rval = 0; 1279 int rval = 0;
1267 struct qla_port_param *port_param = NULL; 1280 struct qla_port_param *port_param = NULL;
1268 fc_port_t *fcport = NULL; 1281 fc_port_t *fcport = NULL;
1282 int found = 0;
1269 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1283 uint16_t mb[MAILBOX_REGISTER_COUNT];
1270 uint8_t *rsp_ptr = NULL; 1284 uint8_t *rsp_ptr = NULL;
1271 1285
@@ -1288,10 +1302,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1288 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1302 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1289 fcport->port_name, sizeof(fcport->port_name))) 1303 fcport->port_name, sizeof(fcport->port_name)))
1290 continue; 1304 continue;
1305
1306 found = 1;
1291 break; 1307 break;
1292 } 1308 }
1293 1309
1294 if (!fcport) { 1310 if (!found) {
1295 ql_log(ql_log_warn, vha, 0x7049, 1311 ql_log(ql_log_warn, vha, 0x7049,
1296 "Failed to find port.\n"); 1312 "Failed to find port.\n");
1297 return -EINVAL; 1313 return -EINVAL;
@@ -1318,12 +1334,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1318 1334
1319 if (rval) { 1335 if (rval) {
1320 ql_log(ql_log_warn, vha, 0x704c, 1336 ql_log(ql_log_warn, vha, 0x704c,
1321 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- " 1337 "iIDMA cmd failed for %8phN -- "
1322 "%04x %x %04x %04x.\n", fcport->port_name[0], 1338 "%04x %x %04x %04x.\n", fcport->port_name,
1323 fcport->port_name[1], fcport->port_name[2], 1339 rval, fcport->fp_speed, mb[0], mb[1]);
1324 fcport->port_name[3], fcport->port_name[4],
1325 fcport->port_name[5], fcport->port_name[6],
1326 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1327 rval = (DID_ERROR << 16); 1340 rval = (DID_ERROR << 16);
1328 } else { 1341 } else {
1329 if (!port_param->mode) { 1342 if (!port_param->mode) {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index df132fec6d86..2ef497ebadc0 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,9 +11,12 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x0159 | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x117a | 0x111a-0x111b | 15 * | Mailbox commands | 0x1181 | 0x111a-0x111b |
16 * | | | 0x1155-0x1158 | 16 * | | | 0x1155-0x1158 |
17 * | | | 0x1018-0x1019 |
18 * | | | 0x1115-0x1116 |
19 * | | | 0x10ca |
17 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 20 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
18 * | | | 0x2011-0x2012, | 21 * | | | 0x2011-0x2012, |
19 * | | | 0x2016 | 22 * | | | 0x2016 |
@@ -24,11 +27,12 @@
24 * | | | 0x3036,0x3038 | 27 * | | | 0x3036,0x3038 |
25 * | | | 0x303a | 28 * | | | 0x303a |
26 * | DPC Thread | 0x4022 | 0x4002,0x4013 | 29 * | DPC Thread | 0x4022 | 0x4002,0x4013 |
27 * | Async Events | 0x5081 | 0x502b-0x502f | 30 * | Async Events | 0x5087 | 0x502b-0x502f |
28 * | | | 0x5047,0x5052 | 31 * | | | 0x5047,0x5052 |
29 * | | | 0x5040,0x5075 | 32 * | | | 0x5084,0x5075 |
30 * | Timer Routines | 0x6011 | | 33 * | | | 0x503d,0x5044 |
31 * | User Space Interactions | 0x70dd | 0x7018,0x702e, | 34 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e1 | 0x7018,0x702e, |
32 * | | | 0x7020,0x7024, | 36 * | | | 0x7020,0x7024, |
33 * | | | 0x7039,0x7045, | 37 * | | | 0x7039,0x7045, |
34 * | | | 0x7073-0x7075, | 38 * | | | 0x7073-0x7075, |
@@ -36,17 +40,28 @@
36 * | | | 0x70a5,0x70a6, | 40 * | | | 0x70a5,0x70a6, |
37 * | | | 0x70a8,0x70ab, | 41 * | | | 0x70a8,0x70ab, |
38 * | | | 0x70ad-0x70ae, | 42 * | | | 0x70ad-0x70ae, |
39 * | | | 0x70d1-0x70da, | 43 * | | | 0x70d1-0x70db, |
40 * | | | 0x7047,0x703b | 44 * | | | 0x7047,0x703b |
41 * | Task Management | 0x803c | 0x8025-0x8026 | 45 * | | | 0x70de-0x70df, |
46 * | Task Management | 0x803d | 0x8025-0x8026 |
42 * | | | 0x800b,0x8039 | 47 * | | | 0x800b,0x8039 |
43 * | AER/EEH | 0x9011 | | 48 * | AER/EEH | 0x9011 | |
44 * | Virtual Port | 0xa007 | | 49 * | Virtual Port | 0xa007 | |
45 * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 | 50 * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 |
51 * | | | 0xb09e,0xb0ae |
52 * | | | 0xb0e0-0xb0ef |
53 * | | | 0xb085,0xb0dc |
54 * | | | 0xb107,0xb108 |
55 * | | | 0xb111,0xb11e |
56 * | | | 0xb12c,0xb12d |
57 * | | | 0xb13a,0xb142 |
58 * | | | 0xb13c-0xb140 |
59 * | | | 0xb149 |
46 * | MultiQ | 0xc00c | | 60 * | MultiQ | 0xc00c | |
47 * | Misc | 0xd010 | | 61 * | Misc | 0xd010 | |
48 * | Target Mode | 0xe070 | | 62 * | Target Mode | 0xe070 | 0xe021 |
49 * | Target Mode Management | 0xf072 | | 63 * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
64 * | | | 0xf046-0xf049 |
50 * | Target Mode Task Management | 0x1000b | | 65 * | Target Mode Task Management | 0x1000b | |
51 * ---------------------------------------------------------------------- 66 * ----------------------------------------------------------------------
52 */ 67 */
@@ -519,7 +534,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
519 uint32_t cnt, que_idx; 534 uint32_t cnt, que_idx;
520 uint8_t que_cnt; 535 uint8_t que_cnt;
521 struct qla2xxx_mq_chain *mq = ptr; 536 struct qla2xxx_mq_chain *mq = ptr;
522 struct device_reg_25xxmq __iomem *reg; 537 device_reg_t __iomem *reg;
523 538
524 if (!ha->mqenable || IS_QLA83XX(ha)) 539 if (!ha->mqenable || IS_QLA83XX(ha))
525 return ptr; 540 return ptr;
@@ -533,13 +548,16 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
533 ha->max_req_queues : ha->max_rsp_queues; 548 ha->max_req_queues : ha->max_rsp_queues;
534 mq->count = htonl(que_cnt); 549 mq->count = htonl(que_cnt);
535 for (cnt = 0; cnt < que_cnt; cnt++) { 550 for (cnt = 0; cnt < que_cnt; cnt++) {
536 reg = (struct device_reg_25xxmq __iomem *) 551 reg = ISP_QUE_REG(ha, cnt);
537 (ha->mqiobase + cnt * QLA_QUE_PAGE);
538 que_idx = cnt * 4; 552 que_idx = cnt * 4;
539 mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in)); 553 mq->qregs[que_idx] =
540 mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out)); 554 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
541 mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in)); 555 mq->qregs[que_idx+1] =
542 mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out)); 556 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
557 mq->qregs[que_idx+2] =
558 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
559 mq->qregs[que_idx+3] =
560 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
543 } 561 }
544 562
545 return ptr + sizeof(struct qla2xxx_mq_chain); 563 return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -941,7 +959,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
941 uint32_t *last_chain = NULL; 959 uint32_t *last_chain = NULL;
942 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 960 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
943 961
944 if (IS_QLA82XX(ha)) 962 if (IS_P3P_TYPE(ha))
945 return; 963 return;
946 964
947 risc_address = ext_mem_cnt = 0; 965 risc_address = ext_mem_cnt = 0;
@@ -2530,7 +2548,7 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2530 if (!ql_mask_match(level)) 2548 if (!ql_mask_match(level))
2531 return; 2549 return;
2532 2550
2533 if (IS_QLA82XX(ha)) 2551 if (IS_P3P_TYPE(ha))
2534 mbx_reg = &reg82->mailbox_in[0]; 2552 mbx_reg = &reg82->mailbox_in[0];
2535 else if (IS_FWI2_CAPABLE(ha)) 2553 else if (IS_FWI2_CAPABLE(ha))
2536 mbx_reg = &reg24->mailbox0; 2554 mbx_reg = &reg24->mailbox0;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 95ca32a71e75..93db74ef3461 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,7 @@
35 35
36#include "qla_bsg.h" 36#include "qla_bsg.h"
37#include "qla_nx.h" 37#include "qla_nx.h"
38#include "qla_nx2.h"
38#define QLA2XXX_DRIVER_NAME "qla2xxx" 39#define QLA2XXX_DRIVER_NAME "qla2xxx"
39#define QLA2XXX_APIDEV "ql2xapidev" 40#define QLA2XXX_APIDEV "ql2xapidev"
40#define QLA2XXX_MANUFACTURER "QLogic Corporation" 41#define QLA2XXX_MANUFACTURER "QLogic Corporation"
@@ -642,6 +643,7 @@ struct device_reg_fx00 {
642 uint32_t initval6; /* C8 */ 643 uint32_t initval6; /* C8 */
643 uint32_t initval7; /* CC */ 644 uint32_t initval7; /* CC */
644 uint32_t fwheartbeat; /* D0 */ 645 uint32_t fwheartbeat; /* D0 */
646 uint32_t pseudoaen; /* D4 */
645}; 647};
646 648
647 649
@@ -805,6 +807,7 @@ struct mbx_cmd_32 {
805#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change 807#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
806 Notification */ 808 Notification */
807#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */ 809#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
810#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
808 811
809/* 83XX FCoE specific */ 812/* 83XX FCoE specific */
810#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ 813#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@@ -997,6 +1000,7 @@ struct mbx_cmd_32 {
997#define MBX_1 BIT_1 1000#define MBX_1 BIT_1
998#define MBX_0 BIT_0 1001#define MBX_0 BIT_0
999 1002
1003#define RNID_TYPE_SET_VERSION 0x9
1000#define RNID_TYPE_ASIC_TEMP 0xC 1004#define RNID_TYPE_ASIC_TEMP 0xC
1001 1005
1002/* 1006/*
@@ -1233,8 +1237,9 @@ struct link_statistics {
1233 uint32_t unused1[0x1a]; 1237 uint32_t unused1[0x1a];
1234 uint32_t tx_frames; 1238 uint32_t tx_frames;
1235 uint32_t rx_frames; 1239 uint32_t rx_frames;
1236 uint32_t dumped_frames; 1240 uint32_t discarded_frames;
1237 uint32_t unused2[2]; 1241 uint32_t dropped_frames;
1242 uint32_t unused2[1];
1238 uint32_t nos_rcvd; 1243 uint32_t nos_rcvd;
1239}; 1244};
1240 1245
@@ -2656,6 +2661,11 @@ struct qla_statistics {
2656 uint32_t total_isp_aborts; 2661 uint32_t total_isp_aborts;
2657 uint64_t input_bytes; 2662 uint64_t input_bytes;
2658 uint64_t output_bytes; 2663 uint64_t output_bytes;
2664 uint64_t input_requests;
2665 uint64_t output_requests;
2666 uint32_t control_requests;
2667
2668 uint64_t jiffies_at_last_reset;
2659}; 2669};
2660 2670
2661struct bidi_statistics { 2671struct bidi_statistics {
@@ -2670,9 +2680,8 @@ struct bidi_statistics {
2670#define QLA_MAX_QUEUES 256 2680#define QLA_MAX_QUEUES 256
2671#define ISP_QUE_REG(ha, id) \ 2681#define ISP_QUE_REG(ha, id) \
2672 ((ha->mqenable || IS_QLA83XX(ha)) ? \ 2682 ((ha->mqenable || IS_QLA83XX(ha)) ? \
2673 ((device_reg_t __iomem *)(ha->mqiobase) +\ 2683 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
2674 (QLA_QUE_PAGE * id)) :\ 2684 ((void __iomem *)ha->iobase))
2675 ((device_reg_t __iomem *)(ha->iobase)))
2676#define QLA_REQ_QUE_ID(tag) \ 2685#define QLA_REQ_QUE_ID(tag) \
2677 ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0) 2686 ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
2678#define QLA_DEFAULT_QUE_QOS 5 2687#define QLA_DEFAULT_QUE_QOS 5
@@ -2935,7 +2944,8 @@ struct qla_hw_data {
2935#define DT_ISP2031 BIT_15 2944#define DT_ISP2031 BIT_15
2936#define DT_ISP8031 BIT_16 2945#define DT_ISP8031 BIT_16
2937#define DT_ISPFX00 BIT_17 2946#define DT_ISPFX00 BIT_17
2938#define DT_ISP_LAST (DT_ISPFX00 << 1) 2947#define DT_ISP8044 BIT_18
2948#define DT_ISP_LAST (DT_ISP8044 << 1)
2939 2949
2940#define DT_T10_PI BIT_25 2950#define DT_T10_PI BIT_25
2941#define DT_IIDMA BIT_26 2951#define DT_IIDMA BIT_26
@@ -2961,6 +2971,7 @@ struct qla_hw_data {
2961#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) 2971#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
2962#define IS_QLA81XX(ha) (IS_QLA8001(ha)) 2972#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2963#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) 2973#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
2974#define IS_QLA8044(ha) (DT_MASK(ha) & DT_ISP8044)
2964#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) 2975#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
2965#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) 2976#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
2966#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) 2977#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
@@ -2975,10 +2986,12 @@ struct qla_hw_data {
2975#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2986#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2976 IS_QLA84XX(ha)) 2987 IS_QLA84XX(ha))
2977#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ 2988#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
2978 IS_QLA8031(ha)) 2989 IS_QLA8031(ha) || IS_QLA8044(ha))
2990#define IS_P3P_TYPE(ha) (IS_QLA82XX(ha) || IS_QLA8044(ha))
2979#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2991#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2980 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ 2992 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
2981 IS_QLA82XX(ha) || IS_QLA83XX(ha)) 2993 IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
2994 IS_QLA8044(ha))
2982#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2995#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2983#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ 2996#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
2984 IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) 2997 IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
@@ -3187,10 +3200,12 @@ struct qla_hw_data {
3187 uint32_t nvram_data_off; 3200 uint32_t nvram_data_off;
3188 3201
3189 uint32_t fdt_wrt_disable; 3202 uint32_t fdt_wrt_disable;
3203 uint32_t fdt_wrt_enable;
3190 uint32_t fdt_erase_cmd; 3204 uint32_t fdt_erase_cmd;
3191 uint32_t fdt_block_size; 3205 uint32_t fdt_block_size;
3192 uint32_t fdt_unprotect_sec_cmd; 3206 uint32_t fdt_unprotect_sec_cmd;
3193 uint32_t fdt_protect_sec_cmd; 3207 uint32_t fdt_protect_sec_cmd;
3208 uint32_t fdt_wrt_sts_reg_cmd;
3194 3209
3195 uint32_t flt_region_flt; 3210 uint32_t flt_region_flt;
3196 uint32_t flt_region_fdt; 3211 uint32_t flt_region_fdt;
@@ -3277,6 +3292,7 @@ struct qla_hw_data {
3277 3292
3278 /* QLA83XX IDC specific fields */ 3293 /* QLA83XX IDC specific fields */
3279 uint32_t idc_audit_ts; 3294 uint32_t idc_audit_ts;
3295 uint32_t idc_extend_tmo;
3280 3296
3281 /* DPC low-priority workqueue */ 3297 /* DPC low-priority workqueue */
3282 struct workqueue_struct *dpc_lp_wq; 3298 struct workqueue_struct *dpc_lp_wq;
@@ -3296,9 +3312,6 @@ struct qla_hw_data {
3296 struct mr_data_fx00 mr; 3312 struct mr_data_fx00 mr;
3297 3313
3298 struct qlt_hw_data tgt; 3314 struct qlt_hw_data tgt;
3299 uint16_t thermal_support;
3300#define THERMAL_SUPPORT_I2C BIT_0
3301#define THERMAL_SUPPORT_ISP BIT_1
3302}; 3315};
3303 3316
3304/* 3317/*
@@ -3364,6 +3377,7 @@ typedef struct scsi_qla_host {
3364#define PORT_UPDATE_NEEDED 24 3377#define PORT_UPDATE_NEEDED 24
3365#define FX00_RESET_RECOVERY 25 3378#define FX00_RESET_RECOVERY 25
3366#define FX00_TARGET_SCAN 26 3379#define FX00_TARGET_SCAN 26
3380#define FX00_CRITEMP_RECOVERY 27
3367 3381
3368 uint32_t device_flags; 3382 uint32_t device_flags;
3369#define SWITCH_FOUND BIT_0 3383#define SWITCH_FOUND BIT_0
@@ -3402,7 +3416,7 @@ typedef struct scsi_qla_host {
3402 uint16_t fcoe_fcf_idx; 3416 uint16_t fcoe_fcf_idx;
3403 uint8_t fcoe_vn_port_mac[6]; 3417 uint8_t fcoe_vn_port_mac[6];
3404 3418
3405 uint32_t vp_abort_cnt; 3419 uint32_t vp_abort_cnt;
3406 3420
3407 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 3421 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
3408 uint16_t vp_idx; /* vport ID */ 3422 uint16_t vp_idx; /* vport ID */
@@ -3435,6 +3449,7 @@ typedef struct scsi_qla_host {
3435 struct bidi_statistics bidi_stats; 3449 struct bidi_statistics bidi_stats;
3436 3450
3437 atomic_t vref_count; 3451 atomic_t vref_count;
3452 struct qla8044_reset_template reset_tmplt;
3438} scsi_qla_host_t; 3453} scsi_qla_host_t;
3439 3454
3440#define SET_VP_IDX 1 3455#define SET_VP_IDX 1
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 1ac2b0e3a0e1..610d3aa905a0 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1387,6 +1387,8 @@ struct qla_flt_header {
1387#define FLT_REG_GOLD_FW 0x2f 1387#define FLT_REG_GOLD_FW 0x2f
1388#define FLT_REG_FCP_PRIO_0 0x87 1388#define FLT_REG_FCP_PRIO_0 0x87
1389#define FLT_REG_FCP_PRIO_1 0x88 1389#define FLT_REG_FCP_PRIO_1 0x88
1390#define FLT_REG_CNA_FW 0x97
1391#define FLT_REG_BOOT_CODE_8044 0xA2
1390#define FLT_REG_FCOE_FW 0xA4 1392#define FLT_REG_FCOE_FW 0xA4
1391#define FLT_REG_FCOE_NVRAM_0 0xAA 1393#define FLT_REG_FCOE_NVRAM_0 0xAA
1392#define FLT_REG_FCOE_NVRAM_1 0xAC 1394#define FLT_REG_FCOE_NVRAM_1 0xAC
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2d98232a08eb..4446bf5fe292 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -357,6 +357,12 @@ extern int
357qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); 357qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
358 358
359extern int 359extern int
360qla82xx_set_driver_version(scsi_qla_host_t *, char *);
361
362extern int
363qla25xx_set_driver_version(scsi_qla_host_t *, char *);
364
365extern int
360qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, 366qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
361 uint16_t, uint16_t, uint16_t, uint16_t); 367 uint16_t, uint16_t, uint16_t, uint16_t);
362 368
@@ -435,19 +441,19 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
435 */ 441 */
436extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); 442extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
437extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, 443extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
438 uint32_t, uint32_t); 444 uint32_t, uint32_t);
439extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 445extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
440 uint32_t); 446 uint32_t);
441extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 447extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
442 uint32_t); 448 uint32_t);
443extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 449extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
444 uint32_t); 450 uint32_t);
445extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 451extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
446 uint32_t); 452 uint32_t);
447extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 453extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
448 uint32_t); 454 uint32_t);
449extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 455extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
450 uint32_t); 456 uint32_t);
451extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); 457extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
452 458
453extern int qla2x00_beacon_on(struct scsi_qla_host *); 459extern int qla2x00_beacon_on(struct scsi_qla_host *);
@@ -463,21 +469,25 @@ extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
463extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *); 469extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
464extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *); 470extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
465extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t, 471extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
466 uint32_t, uint16_t *); 472 uint32_t, uint16_t *);
467 473
468extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 474extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
469 uint32_t, uint32_t); 475 uint32_t, uint32_t);
470extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *, 476extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
471 uint32_t, uint32_t); 477 uint32_t, uint32_t);
472extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, 478extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
473 uint32_t, uint32_t); 479 uint32_t, uint32_t);
474extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, 480extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
475 uint32_t, uint32_t); 481 uint32_t, uint32_t);
476extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, 482extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
477 uint32_t, uint32_t); 483 uint32_t, uint32_t);
484extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
485 uint8_t *, uint32_t, uint32_t);
486extern void qla8044_watchdog(struct scsi_qla_host *vha);
478 487
479extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); 488extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
480extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); 489extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
490extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *);
481 491
482extern int qla2xxx_get_flash_info(scsi_qla_host_t *); 492extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
483extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 493extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
@@ -498,7 +508,7 @@ extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
498extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); 508extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
499extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); 509extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
500extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, 510extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
501 uint8_t *, uint32_t); 511 uint8_t *, uint32_t);
502extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); 512extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
503 513
504/* 514/*
@@ -584,6 +594,7 @@ extern int qlafx00_start_scsi(srb_t *);
584extern int qlafx00_abort_isp(scsi_qla_host_t *); 594extern int qlafx00_abort_isp(scsi_qla_host_t *);
585extern int qlafx00_iospace_config(struct qla_hw_data *); 595extern int qlafx00_iospace_config(struct qla_hw_data *);
586extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t); 596extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
597extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int);
587extern int qlafx00_fw_ready(scsi_qla_host_t *); 598extern int qlafx00_fw_ready(scsi_qla_host_t *);
588extern int qlafx00_configure_devices(scsi_qla_host_t *); 599extern int qlafx00_configure_devices(scsi_qla_host_t *);
589extern int qlafx00_reset_initialize(scsi_qla_host_t *); 600extern int qlafx00_reset_initialize(scsi_qla_host_t *);
@@ -601,6 +612,7 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
601extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *); 612extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
602extern void qlafx00_timer_routine(scsi_qla_host_t *); 613extern void qlafx00_timer_routine(scsi_qla_host_t *);
603extern int qlafx00_rescan_isp(scsi_qla_host_t *); 614extern int qlafx00_rescan_isp(scsi_qla_host_t *);
615extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
604 616
605/* qla82xx related functions */ 617/* qla82xx related functions */
606 618
@@ -619,9 +631,9 @@ extern int qla82xx_start_firmware(scsi_qla_host_t *);
619/* Firmware and flash related functions */ 631/* Firmware and flash related functions */
620extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); 632extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
621extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, 633extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
622 uint32_t, uint32_t); 634 uint32_t, uint32_t);
623extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, 635extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
624 uint32_t, uint32_t); 636 uint32_t, uint32_t);
625 637
626/* Mailbox related functions */ 638/* Mailbox related functions */
627extern int qla82xx_abort_isp(scsi_qla_host_t *); 639extern int qla82xx_abort_isp(scsi_qla_host_t *);
@@ -662,7 +674,7 @@ extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
662extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); 674extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
663 675
664extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, 676extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
665 size_t, char *); 677 size_t, char *);
666extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); 678extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
667extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); 679extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
668extern void qla82xx_start_iocbs(scsi_qla_host_t *); 680extern void qla82xx_start_iocbs(scsi_qla_host_t *);
@@ -674,6 +686,8 @@ extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
674extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); 686extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
675extern char *qdev_state(uint32_t); 687extern char *qdev_state(uint32_t);
676extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *); 688extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
689extern int qla82xx_read_temperature(scsi_qla_host_t *);
690extern int qla8044_read_temperature(scsi_qla_host_t *);
677 691
678/* BSG related functions */ 692/* BSG related functions */
679extern int qla24xx_bsg_request(struct fc_bsg_job *); 693extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -695,5 +709,31 @@ extern void qla82xx_md_free(scsi_qla_host_t *);
695extern int qla82xx_md_collect(scsi_qla_host_t *); 709extern int qla82xx_md_collect(scsi_qla_host_t *);
696extern void qla82xx_md_prep(scsi_qla_host_t *); 710extern void qla82xx_md_prep(scsi_qla_host_t *);
697extern void qla82xx_set_reset_owner(scsi_qla_host_t *); 711extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
712extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha);
713
714/* Function declarations for ISP8044 */
715extern int qla8044_idc_lock(struct qla_hw_data *ha);
716extern void qla8044_idc_unlock(struct qla_hw_data *ha);
717extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr);
718extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val);
719extern void qla8044_read_reset_template(struct scsi_qla_host *ha);
720extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
721extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
722extern void qla8044_wr_direct(struct scsi_qla_host *vha,
723 const uint32_t crb_reg, const uint32_t value);
724extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
725extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
726extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
727extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
728extern void qla8044_clear_drv_active(struct scsi_qla_host *vha);
729void qla8044_get_minidump(struct scsi_qla_host *vha);
730int qla8044_collect_md_data(struct scsi_qla_host *vha);
731extern int qla8044_md_get_template(scsi_qla_host_t *);
732extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
733 uint32_t, uint32_t);
734extern irqreturn_t qla8044_intr_handler(int, void *);
735extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
736extern int qla8044_abort_isp(scsi_qla_host_t *);
737extern int qla8044_check_fw_alive(struct scsi_qla_host *);
698 738
699#endif /* _QLA_GBL_H */ 739#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 0926451980ed..cd47f1b32d9a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -49,6 +49,8 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
49 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 49 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
50 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; 50 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
51 51
52 vha->qla_stats.control_requests++;
53
52 return (ms_pkt); 54 return (ms_pkt);
53} 55}
54 56
@@ -87,6 +89,8 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
87 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 89 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
88 ct_pkt->vp_index = vha->vp_idx; 90 ct_pkt->vp_index = vha->vp_idx;
89 91
92 vha->qla_stats.control_requests++;
93
90 return (ct_pkt); 94 return (ct_pkt);
91} 95}
92 96
@@ -226,17 +230,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
226 fcport->d_id.b.domain = 0xf0; 230 fcport->d_id.b.domain = 0xf0;
227 231
228 ql_dbg(ql_dbg_disc, vha, 0x2063, 232 ql_dbg(ql_dbg_disc, vha, 0x2063,
229 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " 233 "GA_NXT entry - nn %8phN pn %8phN "
230 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
231 "port_id=%02x%02x%02x.\n", 234 "port_id=%02x%02x%02x.\n",
232 fcport->node_name[0], fcport->node_name[1], 235 fcport->node_name, fcport->port_name,
233 fcport->node_name[2], fcport->node_name[3],
234 fcport->node_name[4], fcport->node_name[5],
235 fcport->node_name[6], fcport->node_name[7],
236 fcport->port_name[0], fcport->port_name[1],
237 fcport->port_name[2], fcport->port_name[3],
238 fcport->port_name[4], fcport->port_name[5],
239 fcport->port_name[6], fcport->port_name[7],
240 fcport->d_id.b.domain, fcport->d_id.b.area, 236 fcport->d_id.b.domain, fcport->d_id.b.area,
241 fcport->d_id.b.al_pa); 237 fcport->d_id.b.al_pa);
242 } 238 }
@@ -447,17 +443,9 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
447 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 443 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
448 444
449 ql_dbg(ql_dbg_disc, vha, 0x2058, 445 ql_dbg(ql_dbg_disc, vha, 0x2058,
450 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x " 446 "GID_PT entry - nn %8phN pn %8phN "
451 "pn %02x%02x%02x%02x%02x%02x%02X%02x "
452 "portid=%02x%02x%02x.\n", 447 "portid=%02x%02x%02x.\n",
453 list[i].node_name[0], list[i].node_name[1], 448 list[i].node_name, list[i].port_name,
454 list[i].node_name[2], list[i].node_name[3],
455 list[i].node_name[4], list[i].node_name[5],
456 list[i].node_name[6], list[i].node_name[7],
457 list[i].port_name[0], list[i].port_name[1],
458 list[i].port_name[2], list[i].port_name[3],
459 list[i].port_name[4], list[i].port_name[5],
460 list[i].port_name[6], list[i].port_name[7],
461 list[i].d_id.b.domain, list[i].d_id.b.area, 449 list[i].d_id.b.domain, list[i].d_id.b.area,
462 list[i].d_id.b.al_pa); 450 list[i].d_id.b.al_pa);
463 } 451 }
@@ -739,6 +727,8 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
739 wc = (data_size - 16) / 4; /* Size in 32bit words. */ 727 wc = (data_size - 16) / 4; /* Size in 32bit words. */
740 sns_cmd->p.cmd.size = cpu_to_le16(wc); 728 sns_cmd->p.cmd.size = cpu_to_le16(wc);
741 729
730 vha->qla_stats.control_requests++;
731
742 return (sns_cmd); 732 return (sns_cmd);
743} 733}
744 734
@@ -796,17 +786,9 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
796 fcport->d_id.b.domain = 0xf0; 786 fcport->d_id.b.domain = 0xf0;
797 787
798 ql_dbg(ql_dbg_disc, vha, 0x2061, 788 ql_dbg(ql_dbg_disc, vha, 0x2061,
799 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " 789 "GA_NXT entry - nn %8phN pn %8phN "
800 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
801 "port_id=%02x%02x%02x.\n", 790 "port_id=%02x%02x%02x.\n",
802 fcport->node_name[0], fcport->node_name[1], 791 fcport->node_name, fcport->port_name,
803 fcport->node_name[2], fcport->node_name[3],
804 fcport->node_name[4], fcport->node_name[5],
805 fcport->node_name[6], fcport->node_name[7],
806 fcport->port_name[0], fcport->port_name[1],
807 fcport->port_name[2], fcport->port_name[3],
808 fcport->port_name[4], fcport->port_name[5],
809 fcport->port_name[6], fcport->port_name[7],
810 fcport->d_id.b.domain, fcport->d_id.b.area, 792 fcport->d_id.b.domain, fcport->d_id.b.area,
811 fcport->d_id.b.al_pa); 793 fcport->d_id.b.al_pa);
812 } 794 }
@@ -991,17 +973,9 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
991 WWN_SIZE); 973 WWN_SIZE);
992 974
993 ql_dbg(ql_dbg_disc, vha, 0x206e, 975 ql_dbg(ql_dbg_disc, vha, 0x206e,
994 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " 976 "GID_PT entry - nn %8phN pn %8phN "
995 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
996 "port_id=%02x%02x%02x.\n", 977 "port_id=%02x%02x%02x.\n",
997 list[i].node_name[0], list[i].node_name[1], 978 list[i].node_name, list[i].port_name,
998 list[i].node_name[2], list[i].node_name[3],
999 list[i].node_name[4], list[i].node_name[5],
1000 list[i].node_name[6], list[i].node_name[7],
1001 list[i].port_name[0], list[i].port_name[1],
1002 list[i].port_name[2], list[i].port_name[3],
1003 list[i].port_name[4], list[i].port_name[5],
1004 list[i].port_name[6], list[i].port_name[7],
1005 list[i].d_id.b.domain, list[i].d_id.b.area, 979 list[i].d_id.b.domain, list[i].d_id.b.area,
1006 list[i].d_id.b.al_pa); 980 list[i].d_id.b.al_pa);
1007 } 981 }
@@ -1321,11 +1295,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1321 size += 4 + WWN_SIZE; 1295 size += 4 + WWN_SIZE;
1322 1296
1323 ql_dbg(ql_dbg_disc, vha, 0x2025, 1297 ql_dbg(ql_dbg_disc, vha, 0x2025,
1324 "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n", 1298 "NodeName = %8phN.\n", eiter->a.node_name);
1325 eiter->a.node_name[0], eiter->a.node_name[1],
1326 eiter->a.node_name[2], eiter->a.node_name[3],
1327 eiter->a.node_name[4], eiter->a.node_name[5],
1328 eiter->a.node_name[6], eiter->a.node_name[7]);
1329 1299
1330 /* Manufacturer. */ 1300 /* Manufacturer. */
1331 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1301 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1428,16 +1398,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1428 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1398 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1429 1399
1430 ql_dbg(ql_dbg_disc, vha, 0x202e, 1400 ql_dbg(ql_dbg_disc, vha, 0x202e,
1431 "RHBA identifier = " 1401 "RHBA identifier = %8phN size=%d.\n",
1432 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", 1402 ct_req->req.rhba.hba_identifier, size);
1433 ct_req->req.rhba.hba_identifier[0],
1434 ct_req->req.rhba.hba_identifier[1],
1435 ct_req->req.rhba.hba_identifier[2],
1436 ct_req->req.rhba.hba_identifier[3],
1437 ct_req->req.rhba.hba_identifier[4],
1438 ct_req->req.rhba.hba_identifier[5],
1439 ct_req->req.rhba.hba_identifier[6],
1440 ct_req->req.rhba.hba_identifier[7], size);
1441 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076, 1403 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1442 entries, size); 1404 entries, size);
1443 1405
@@ -1494,11 +1456,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1494 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 1456 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1495 1457
1496 ql_dbg(ql_dbg_disc, vha, 0x2036, 1458 ql_dbg(ql_dbg_disc, vha, 0x2036,
1497 "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n", 1459 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
1498 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1499 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1500 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1501 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
1502 1460
1503 /* Execute MS IOCB */ 1461 /* Execute MS IOCB */
1504 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1462 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -1678,12 +1636,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1678 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1636 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1679 1637
1680 ql_dbg(ql_dbg_disc, vha, 0x203e, 1638 ql_dbg(ql_dbg_disc, vha, 0x203e,
1681 "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n", 1639 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
1682 ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
1683 ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
1684 ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
1685 ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
1686 size);
1687 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079, 1640 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1688 entries, size); 1641 entries, size);
1689 1642
@@ -1940,16 +1893,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1940 1893
1941 ql_dbg(ql_dbg_disc, vha, 0x205b, 1894 ql_dbg(ql_dbg_disc, vha, 0x205b,
1942 "GPSC ext entry - fpn " 1895 "GPSC ext entry - fpn "
1943 "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1896 "%8phN speeds=%04x speed=%04x.\n",
1944 "speed=%04x.\n", 1897 list[i].fabric_port_name,
1945 list[i].fabric_port_name[0],
1946 list[i].fabric_port_name[1],
1947 list[i].fabric_port_name[2],
1948 list[i].fabric_port_name[3],
1949 list[i].fabric_port_name[4],
1950 list[i].fabric_port_name[5],
1951 list[i].fabric_port_name[6],
1952 list[i].fabric_port_name[7],
1953 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 1898 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
1954 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 1899 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
1955 } 1900 }
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f2216ed2ad8c..03f715e7591e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -524,7 +524,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
524 vha->flags.reset_active = 0; 524 vha->flags.reset_active = 0;
525 ha->flags.pci_channel_io_perm_failure = 0; 525 ha->flags.pci_channel_io_perm_failure = 0;
526 ha->flags.eeh_busy = 0; 526 ha->flags.eeh_busy = 0;
527 ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP; 527 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
528 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 528 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
529 atomic_set(&vha->loop_state, LOOP_DOWN); 529 atomic_set(&vha->loop_state, LOOP_DOWN);
530 vha->device_flags = DFLG_NO_CABLE; 530 vha->device_flags = DFLG_NO_CABLE;
@@ -552,7 +552,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
552 if (rval) { 552 if (rval) {
553 ql_log(ql_log_fatal, vha, 0x004f, 553 ql_log(ql_log_fatal, vha, 0x004f,
554 "Unable to validate FLASH data.\n"); 554 "Unable to validate FLASH data.\n");
555 return (rval); 555 return rval;
556 }
557
558 if (IS_QLA8044(ha)) {
559 qla8044_read_reset_template(vha);
560
561 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
562 * If DONRESET_BIT0 is set, drivers should not set dev_state
563 * to NEED_RESET. But if NEED_RESET is set, drivers should
564 * should honor the reset. */
565 if (ql2xdontresethba == 1)
566 qla8044_set_idc_dontreset(vha);
556 } 567 }
557 568
558 ha->isp_ops->get_flash_version(vha, req->ring); 569 ha->isp_ops->get_flash_version(vha, req->ring);
@@ -564,12 +575,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
564 if (ha->flags.disable_serdes) { 575 if (ha->flags.disable_serdes) {
565 /* Mask HBA via NVRAM settings? */ 576 /* Mask HBA via NVRAM settings? */
566 ql_log(ql_log_info, vha, 0x0077, 577 ql_log(ql_log_info, vha, 0x0077,
567 "Masking HBA WWPN " 578 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
568 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
569 vha->port_name[0], vha->port_name[1],
570 vha->port_name[2], vha->port_name[3],
571 vha->port_name[4], vha->port_name[5],
572 vha->port_name[6], vha->port_name[7]);
573 return QLA_FUNCTION_FAILED; 579 return QLA_FUNCTION_FAILED;
574 } 580 }
575 581
@@ -620,6 +626,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
620 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 626 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
621 qla24xx_read_fcp_prio_cfg(vha); 627 qla24xx_read_fcp_prio_cfg(vha);
622 628
629 if (IS_P3P_TYPE(ha))
630 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
631 else
632 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
633
623 return (rval); 634 return (rval);
624} 635}
625 636
@@ -1332,7 +1343,7 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
1332 struct qla_hw_data *ha = vha->hw; 1343 struct qla_hw_data *ha = vha->hw;
1333 struct req_que *req = ha->req_q_map[0]; 1344 struct req_que *req = ha->req_q_map[0];
1334 1345
1335 if (IS_QLA82XX(ha)) 1346 if (IS_P3P_TYPE(ha))
1336 return QLA_SUCCESS; 1347 return QLA_SUCCESS;
1337 1348
1338 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 1349 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
@@ -1615,7 +1626,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1615 unsigned long flags; 1626 unsigned long flags;
1616 uint16_t fw_major_version; 1627 uint16_t fw_major_version;
1617 1628
1618 if (IS_QLA82XX(ha)) { 1629 if (IS_P3P_TYPE(ha)) {
1619 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1630 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1620 if (rval == QLA_SUCCESS) { 1631 if (rval == QLA_SUCCESS) {
1621 qla2x00_stop_firmware(vha); 1632 qla2x00_stop_firmware(vha);
@@ -1651,7 +1662,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1651 if (rval == QLA_SUCCESS) { 1662 if (rval == QLA_SUCCESS) {
1652enable_82xx_npiv: 1663enable_82xx_npiv:
1653 fw_major_version = ha->fw_major_version; 1664 fw_major_version = ha->fw_major_version;
1654 if (IS_QLA82XX(ha)) 1665 if (IS_P3P_TYPE(ha))
1655 qla82xx_check_md_needed(vha); 1666 qla82xx_check_md_needed(vha);
1656 else 1667 else
1657 rval = qla2x00_get_fw_version(vha); 1668 rval = qla2x00_get_fw_version(vha);
@@ -1681,7 +1692,7 @@ enable_82xx_npiv:
1681 goto failed; 1692 goto failed;
1682 1693
1683 if (!fw_major_version && ql2xallocfwdump 1694 if (!fw_major_version && ql2xallocfwdump
1684 && !IS_QLA82XX(ha)) 1695 && !(IS_P3P_TYPE(ha)))
1685 qla2x00_alloc_fw_dump(vha); 1696 qla2x00_alloc_fw_dump(vha);
1686 } 1697 }
1687 } else { 1698 } else {
@@ -1849,7 +1860,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
1849 int rval; 1860 int rval;
1850 struct qla_hw_data *ha = vha->hw; 1861 struct qla_hw_data *ha = vha->hw;
1851 1862
1852 if (IS_QLA82XX(ha)) 1863 if (IS_P3P_TYPE(ha))
1853 return; 1864 return;
1854 1865
1855 /* Update Serial Link options. */ 1866 /* Update Serial Link options. */
@@ -3061,22 +3072,13 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3061 mb); 3072 mb);
3062 if (rval != QLA_SUCCESS) { 3073 if (rval != QLA_SUCCESS) {
3063 ql_dbg(ql_dbg_disc, vha, 0x2004, 3074 ql_dbg(ql_dbg_disc, vha, 0x2004,
3064 "Unable to adjust iIDMA " 3075 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
3065 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x " 3076 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
3066 "%04x.\n", fcport->port_name[0], fcport->port_name[1],
3067 fcport->port_name[2], fcport->port_name[3],
3068 fcport->port_name[4], fcport->port_name[5],
3069 fcport->port_name[6], fcport->port_name[7], rval,
3070 fcport->fp_speed, mb[0], mb[1]);
3071 } else { 3077 } else {
3072 ql_dbg(ql_dbg_disc, vha, 0x2005, 3078 ql_dbg(ql_dbg_disc, vha, 0x2005,
3073 "iIDMA adjusted to %s GB/s " 3079 "iIDMA adjusted to %s GB/s on %8phN.\n",
3074 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
3075 qla2x00_get_link_speed_str(ha, fcport->fp_speed), 3080 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
3076 fcport->port_name[0], fcport->port_name[1], 3081 fcport->port_name);
3077 fcport->port_name[2], fcport->port_name[3],
3078 fcport->port_name[4], fcport->port_name[5],
3079 fcport->port_name[6], fcport->port_name[7]);
3080 } 3082 }
3081} 3083}
3082 3084
@@ -4007,10 +4009,18 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha)
4007 uint32_t class_type_mask = 0x3; 4009 uint32_t class_type_mask = 0x3;
4008 uint16_t fcoe_other_function = 0xffff, i; 4010 uint16_t fcoe_other_function = 0xffff, i;
4009 4011
4010 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); 4012 if (IS_QLA8044(ha)) {
4011 4013 drv_presence = qla8044_rd_direct(vha,
4012 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); 4014 QLA8044_CRB_DRV_ACTIVE_INDEX);
4013 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); 4015 dev_part_info1 = qla8044_rd_direct(vha,
4016 QLA8044_CRB_DEV_PART_INFO_INDEX);
4017 dev_part_info2 = qla8044_rd_direct(vha,
4018 QLA8044_CRB_DEV_PART_INFO2);
4019 } else {
4020 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4021 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
4022 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
4023 }
4014 for (i = 0; i < 8; i++) { 4024 for (i = 0; i < 8; i++) {
4015 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); 4025 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
4016 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && 4026 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
@@ -4347,7 +4357,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4347 /* For ISP82XX, driver waits for completion of the commands. 4357 /* For ISP82XX, driver waits for completion of the commands.
4348 * online flag should be set. 4358 * online flag should be set.
4349 */ 4359 */
4350 if (!IS_QLA82XX(ha)) 4360 if (!(IS_P3P_TYPE(ha)))
4351 vha->flags.online = 0; 4361 vha->flags.online = 0;
4352 ha->flags.chip_reset_done = 0; 4362 ha->flags.chip_reset_done = 0;
4353 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4363 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -4360,7 +4370,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4360 * Driver waits for the completion of the commands. 4370 * Driver waits for the completion of the commands.
4361 * the interrupts need to be enabled. 4371 * the interrupts need to be enabled.
4362 */ 4372 */
4363 if (!IS_QLA82XX(ha)) 4373 if (!(IS_P3P_TYPE(ha)))
4364 ha->isp_ops->reset_chip(vha); 4374 ha->isp_ops->reset_chip(vha);
4365 4375
4366 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 4376 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -4403,7 +4413,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4403 4413
4404 if (!ha->flags.eeh_busy) { 4414 if (!ha->flags.eeh_busy) {
4405 /* Make sure for ISP 82XX IO DMA is complete */ 4415 /* Make sure for ISP 82XX IO DMA is complete */
4406 if (IS_QLA82XX(ha)) { 4416 if (IS_P3P_TYPE(ha)) {
4407 qla82xx_chip_reset_cleanup(vha); 4417 qla82xx_chip_reset_cleanup(vha);
4408 ql_log(ql_log_info, vha, 0x00b4, 4418 ql_log(ql_log_info, vha, 0x00b4,
4409 "Done chip reset cleanup.\n"); 4419 "Done chip reset cleanup.\n");
@@ -4723,7 +4733,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
4723 struct qla_hw_data *ha = vha->hw; 4733 struct qla_hw_data *ha = vha->hw;
4724 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 4734 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4725 4735
4726 if (IS_QLA82XX(ha)) 4736 if (IS_P3P_TYPE(ha))
4727 return; 4737 return;
4728 4738
4729 vha->flags.online = 0; 4739 vha->flags.online = 0;
@@ -4789,8 +4799,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4789 } 4799 }
4790 ha->nvram_size = sizeof(struct nvram_24xx); 4800 ha->nvram_size = sizeof(struct nvram_24xx);
4791 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4801 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4792 if (IS_QLA82XX(ha))
4793 ha->vpd_size = FA_VPD_SIZE_82XX;
4794 4802
4795 /* Get VPD data into cache */ 4803 /* Get VPD data into cache */
4796 ha->vpd = ha->nvram + VPD_OFFSET; 4804 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5552,6 +5560,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5552 /* Determine NVRAM starting address. */ 5560 /* Determine NVRAM starting address. */
5553 ha->nvram_size = sizeof(struct nvram_81xx); 5561 ha->nvram_size = sizeof(struct nvram_81xx);
5554 ha->vpd_size = FA_NVRAM_VPD_SIZE; 5562 ha->vpd_size = FA_NVRAM_VPD_SIZE;
5563 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
5564 ha->vpd_size = FA_VPD_SIZE_82XX;
5555 5565
5556 /* Get VPD data into cache */ 5566 /* Get VPD data into cache */
5557 ha->vpd = ha->nvram + VPD_OFFSET; 5567 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5734,7 +5744,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5734 5744
5735 /* Link Down Timeout = 0: 5745 /* Link Down Timeout = 0:
5736 * 5746 *
5737 * When Port Down timer expires we will start returning 5747 * When Port Down timer expires we will start returning
5738 * I/O's to OS with "DID_NO_CONNECT". 5748 * I/O's to OS with "DID_NO_CONNECT".
5739 * 5749 *
5740 * Link Down Timeout != 0: 5750 * Link Down Timeout != 0:
@@ -6061,7 +6071,7 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
6061 if (priority < 0) 6071 if (priority < 0)
6062 return QLA_FUNCTION_FAILED; 6072 return QLA_FUNCTION_FAILED;
6063 6073
6064 if (IS_QLA82XX(vha->hw)) { 6074 if (IS_P3P_TYPE(vha->hw)) {
6065 fcport->fcp_prio = priority & 0xf; 6075 fcport->fcp_prio = priority & 0xf;
6066 return QLA_SUCCESS; 6076 return QLA_SUCCESS;
6067 } 6077 }
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 28c38b4929ce..957088b04611 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -59,7 +59,7 @@ qla2x00_poll(struct rsp_que *rsp)
59 unsigned long flags; 59 unsigned long flags;
60 struct qla_hw_data *ha = rsp->hw; 60 struct qla_hw_data *ha = rsp->hw;
61 local_irq_save(flags); 61 local_irq_save(flags);
62 if (IS_QLA82XX(ha)) 62 if (IS_P3P_TYPE(ha))
63 qla82xx_poll(0, rsp); 63 qla82xx_poll(0, rsp);
64 else 64 else
65 ha->isp_ops->intr_handler(0, rsp); 65 ha->isp_ops->intr_handler(0, rsp);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ef0a5481b9dd..46b9307e8be4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -32,9 +32,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
32 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 cflags = CF_WRITE; 33 cflags = CF_WRITE;
34 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 36 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ; 37 cflags = CF_READ;
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 38 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39 vha->qla_stats.input_requests++;
38 } 40 }
39 return (cflags); 41 return (cflags);
40} 42}
@@ -474,7 +476,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
474 struct qla_hw_data *ha = vha->hw; 476 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 477 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476 478
477 if (IS_QLA82XX(ha)) { 479 if (IS_P3P_TYPE(ha)) {
478 qla82xx_start_iocbs(vha); 480 qla82xx_start_iocbs(vha);
479 } else { 481 } else {
480 /* Adjust ring index. */ 482 /* Adjust ring index. */
@@ -642,10 +644,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
642 cmd_pkt->control_flags = 644 cmd_pkt->control_flags =
643 __constant_cpu_to_le16(CF_WRITE_DATA); 645 __constant_cpu_to_le16(CF_WRITE_DATA);
644 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 646 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
647 vha->qla_stats.output_requests++;
645 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 648 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
646 cmd_pkt->control_flags = 649 cmd_pkt->control_flags =
647 __constant_cpu_to_le16(CF_READ_DATA); 650 __constant_cpu_to_le16(CF_READ_DATA);
648 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 651 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
652 vha->qla_stats.input_requests++;
649 } 653 }
650 654
651 cur_seg = scsi_sglist(cmd); 655 cur_seg = scsi_sglist(cmd);
@@ -758,10 +762,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
758 cmd_pkt->task_mgmt_flags = 762 cmd_pkt->task_mgmt_flags =
759 __constant_cpu_to_le16(TMF_WRITE_DATA); 763 __constant_cpu_to_le16(TMF_WRITE_DATA);
760 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 764 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
765 vha->qla_stats.output_requests++;
761 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 766 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
762 cmd_pkt->task_mgmt_flags = 767 cmd_pkt->task_mgmt_flags =
763 __constant_cpu_to_le16(TMF_READ_DATA); 768 __constant_cpu_to_le16(TMF_READ_DATA);
764 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 769 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
770 vha->qla_stats.input_requests++;
765 } 771 }
766 772
767 /* One DSD is available in the Command Type 3 IOCB */ 773 /* One DSD is available in the Command Type 3 IOCB */
@@ -1844,7 +1850,7 @@ skip_cmd_array:
1844 if (req->cnt < req_cnt) { 1850 if (req->cnt < req_cnt) {
1845 if (ha->mqenable || IS_QLA83XX(ha)) 1851 if (ha->mqenable || IS_QLA83XX(ha))
1846 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); 1852 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1847 else if (IS_QLA82XX(ha)) 1853 else if (IS_P3P_TYPE(ha))
1848 cnt = RD_REG_DWORD(&reg->isp82.req_q_out); 1854 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1849 else if (IS_FWI2_CAPABLE(ha)) 1855 else if (IS_FWI2_CAPABLE(ha))
1850 cnt = RD_REG_DWORD(&reg->isp24.req_q_out); 1856 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
@@ -2056,6 +2062,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2056 (bsg_job->reply_payload.sg_list))); 2062 (bsg_job->reply_payload.sg_list)));
2057 els_iocb->rx_len = cpu_to_le32(sg_dma_len 2063 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2058 (bsg_job->reply_payload.sg_list)); 2064 (bsg_job->reply_payload.sg_list));
2065
2066 sp->fcport->vha->qla_stats.control_requests++;
2059} 2067}
2060 2068
2061static void 2069static void
@@ -2133,6 +2141,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2133 avail_dsds--; 2141 avail_dsds--;
2134 } 2142 }
2135 ct_iocb->entry_count = entry_count; 2143 ct_iocb->entry_count = entry_count;
2144
2145 sp->fcport->vha->qla_stats.control_requests++;
2136} 2146}
2137 2147
2138static void 2148static void
@@ -2685,6 +2695,9 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2685 vha->bidi_stats.transfer_bytes += req_data_len; 2695 vha->bidi_stats.transfer_bytes += req_data_len;
2686 vha->bidi_stats.io_count++; 2696 vha->bidi_stats.io_count++;
2687 2697
2698 vha->qla_stats.output_bytes += req_data_len;
2699 vha->qla_stats.output_requests++;
2700
2688 /* Only one dsd is available for bidirectional IOCB, remaining dsds 2701 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2689 * are bundled in continuation iocb 2702 * are bundled in continuation iocb
2690 */ 2703 */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2d8e7b812352..df1b30ba938c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -282,25 +282,38 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
282 "%04x %04x %04x %04x %04x %04x %04x.\n", 282 "%04x %04x %04x %04x %04x %04x %04x.\n",
283 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 283 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
284 mb[4], mb[5], mb[6]); 284 mb[4], mb[5], mb[6]);
285 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { 285 switch (aen) {
286 vha->hw->flags.idc_compl_status = 1; 286 /* Handle IDC Error completion case. */
287 if (vha->hw->notify_dcbx_comp) 287 case MBA_IDC_COMPLETE:
288 complete(&vha->hw->dcbx_comp); 288 if (mb[1] >> 15) {
289 } 289 vha->hw->flags.idc_compl_status = 1;
290 290 if (vha->hw->notify_dcbx_comp)
291 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 291 complete(&vha->hw->dcbx_comp);
292 timeout = (descr >> 8) & 0xf; 292 }
293 if (aen != MBA_IDC_NOTIFY || !timeout) 293 break;
294 return;
295 294
296 ql_dbg(ql_dbg_async, vha, 0x5022, 295 case MBA_IDC_NOTIFY:
297 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 296 /* Acknowledgement needed? [Notify && non-zero timeout]. */
298 vha->host_no, event[aen & 0xff], timeout); 297 timeout = (descr >> 8) & 0xf;
298 ql_dbg(ql_dbg_async, vha, 0x5022,
299 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
300 vha->host_no, event[aen & 0xff], timeout);
299 301
300 rval = qla2x00_post_idc_ack_work(vha, mb); 302 if (!timeout)
301 if (rval != QLA_SUCCESS) 303 return;
302 ql_log(ql_log_warn, vha, 0x5023, 304 rval = qla2x00_post_idc_ack_work(vha, mb);
303 "IDC failed to post ACK.\n"); 305 if (rval != QLA_SUCCESS)
306 ql_log(ql_log_warn, vha, 0x5023,
307 "IDC failed to post ACK.\n");
308 break;
309 case MBA_IDC_TIME_EXT:
310 vha->hw->idc_extend_tmo = descr;
311 ql_dbg(ql_dbg_async, vha, 0x5087,
312 "%lu Inter-Driver Communication %s -- "
313 "Extend timeout by=%d.\n",
314 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
315 break;
316 }
304} 317}
305 318
306#define LS_UNKNOWN 2 319#define LS_UNKNOWN 2
@@ -691,7 +704,8 @@ skip_rio:
691 case MBA_LOOP_DOWN: /* Loop Down Event */ 704 case MBA_LOOP_DOWN: /* Loop Down Event */
692 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 705 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
693 ? RD_REG_WORD(&reg24->mailbox4) : 0; 706 ? RD_REG_WORD(&reg24->mailbox4) : 0;
694 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 707 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
708 : mbx;
695 ql_dbg(ql_dbg_async, vha, 0x500b, 709 ql_dbg(ql_dbg_async, vha, 0x500b,
696 "LOOP DOWN detected (%x %x %x %x).\n", 710 "LOOP DOWN detected (%x %x %x %x).\n",
697 mb[1], mb[2], mb[3], mbx); 711 mb[1], mb[2], mb[3], mbx);
@@ -740,7 +754,7 @@ skip_rio:
740 if (IS_QLA2100(ha)) 754 if (IS_QLA2100(ha))
741 break; 755 break;
742 756
743 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { 757 if (IS_CNA_CAPABLE(ha)) {
744 ql_dbg(ql_dbg_async, vha, 0x500d, 758 ql_dbg(ql_dbg_async, vha, 0x500d,
745 "DCBX Completed -- %04x %04x %04x.\n", 759 "DCBX Completed -- %04x %04x %04x.\n",
746 mb[1], mb[2], mb[3]); 760 mb[1], mb[2], mb[3]);
@@ -1002,7 +1016,7 @@ skip_rio:
1002 mb[1], mb[2], mb[3]); 1016 mb[1], mb[2], mb[3]);
1003 break; 1017 break;
1004 case MBA_IDC_NOTIFY: 1018 case MBA_IDC_NOTIFY:
1005 if (IS_QLA8031(vha->hw)) { 1019 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1006 mb[4] = RD_REG_WORD(&reg24->mailbox4); 1020 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1007 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1021 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1008 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1022 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
@@ -1022,7 +1036,8 @@ skip_rio:
1022 complete(&ha->lb_portup_comp); 1036 complete(&ha->lb_portup_comp);
1023 /* Fallthru */ 1037 /* Fallthru */
1024 case MBA_IDC_TIME_EXT: 1038 case MBA_IDC_TIME_EXT:
1025 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) 1039 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1040 IS_QLA8044(ha))
1026 qla81xx_idc_event(vha, mb[0], mb[1]); 1041 qla81xx_idc_event(vha, mb[0], mb[1]);
1027 break; 1042 break;
1028 1043
@@ -1063,7 +1078,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
1063 ql_log(ql_log_warn, vha, 0x3014, 1078 ql_log(ql_log_warn, vha, 0x3014,
1064 "Invalid SCSI command index (%x).\n", index); 1079 "Invalid SCSI command index (%x).\n", index);
1065 1080
1066 if (IS_QLA82XX(ha)) 1081 if (IS_P3P_TYPE(ha))
1067 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1082 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1068 else 1083 else
1069 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1084 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1080,7 +1095,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
1080 } else { 1095 } else {
1081 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1096 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1082 1097
1083 if (IS_QLA82XX(ha)) 1098 if (IS_P3P_TYPE(ha))
1084 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1099 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1085 else 1100 else
1086 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1101 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1100,7 +1115,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1100 if (index >= req->num_outstanding_cmds) { 1115 if (index >= req->num_outstanding_cmds) {
1101 ql_log(ql_log_warn, vha, 0x5031, 1116 ql_log(ql_log_warn, vha, 0x5031,
1102 "Invalid command index (%x).\n", index); 1117 "Invalid command index (%x).\n", index);
1103 if (IS_QLA82XX(ha)) 1118 if (IS_P3P_TYPE(ha))
1104 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1119 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1105 else 1120 else
1106 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1121 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1805,6 +1820,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1805 if (scsi_status == 0) { 1820 if (scsi_status == 0) {
1806 bsg_job->reply->reply_payload_rcv_len = 1821 bsg_job->reply->reply_payload_rcv_len =
1807 bsg_job->reply_payload.payload_len; 1822 bsg_job->reply_payload.payload_len;
1823 vha->qla_stats.input_bytes +=
1824 bsg_job->reply->reply_payload_rcv_len;
1825 vha->qla_stats.input_requests++;
1808 rval = EXT_STATUS_OK; 1826 rval = EXT_STATUS_OK;
1809 } 1827 }
1810 goto done; 1828 goto done;
@@ -1949,7 +1967,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1949 ql_dbg(ql_dbg_io, vha, 0x3017, 1967 ql_dbg(ql_dbg_io, vha, 0x3017,
1950 "Invalid status handle (0x%x).\n", sts->handle); 1968 "Invalid status handle (0x%x).\n", sts->handle);
1951 1969
1952 if (IS_QLA82XX(ha)) 1970 if (IS_P3P_TYPE(ha))
1953 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1971 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1954 else 1972 else
1955 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1973 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2176,8 +2194,10 @@ check_scsi_status:
2176 } 2194 }
2177 2195
2178 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 2196 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2179 "Port down status: port-state=0x%x.\n", 2197 "Port to be marked lost on fcport=%02x%02x%02x, current "
2180 atomic_read(&fcport->state)); 2198 "port state= %s.\n", fcport->d_id.b.domain,
2199 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2200 port_state_str[atomic_read(&fcport->state)]);
2181 2201
2182 if (atomic_read(&fcport->state) == FCS_ONLINE) 2202 if (atomic_read(&fcport->state) == FCS_ONLINE)
2183 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2203 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -2212,16 +2232,13 @@ check_scsi_status:
2212out: 2232out:
2213 if (logit) 2233 if (logit)
2214 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2234 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2215 "FCP command status: 0x%x-0x%x (0x%x) " 2235 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
2216 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 2236 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2217 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2218 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 2237 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2219 comp_status, scsi_status, res, vha->host_no, 2238 comp_status, scsi_status, res, vha->host_no,
2220 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2239 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2221 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2240 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2222 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 2241 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2223 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2224 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
2225 resid_len, fw_resid_len); 2242 resid_len, fw_resid_len);
2226 2243
2227 if (!res) 2244 if (!res)
@@ -2324,7 +2341,7 @@ fatal:
2324 ql_log(ql_log_warn, vha, 0x5030, 2341 ql_log(ql_log_warn, vha, 0x5030,
2325 "Error entry - invalid handle/queue.\n"); 2342 "Error entry - invalid handle/queue.\n");
2326 2343
2327 if (IS_QLA82XX(ha)) 2344 if (IS_P3P_TYPE(ha))
2328 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2345 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2329 else 2346 else
2330 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2347 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2452,7 +2469,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2452 } 2469 }
2453 2470
2454 /* Adjust ring index */ 2471 /* Adjust ring index */
2455 if (IS_QLA82XX(ha)) { 2472 if (IS_P3P_TYPE(ha)) {
2456 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2473 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2457 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index); 2474 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2458 } else 2475 } else
@@ -2865,7 +2882,7 @@ msix_failed:
2865 ret = request_irq(qentry->vector, 2882 ret = request_irq(qentry->vector,
2866 qla83xx_msix_entries[i].handler, 2883 qla83xx_msix_entries[i].handler,
2867 0, qla83xx_msix_entries[i].name, rsp); 2884 0, qla83xx_msix_entries[i].name, rsp);
2868 } else if (IS_QLA82XX(ha)) { 2885 } else if (IS_P3P_TYPE(ha)) {
2869 ret = request_irq(qentry->vector, 2886 ret = request_irq(qentry->vector,
2870 qla82xx_msix_entries[i].handler, 2887 qla82xx_msix_entries[i].handler,
2871 0, qla82xx_msix_entries[i].name, rsp); 2888 0, qla82xx_msix_entries[i].name, rsp);
@@ -2950,7 +2967,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2950skip_msix: 2967skip_msix:
2951 2968
2952 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2969 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2953 !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha)) 2970 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
2954 goto skip_msi; 2971 goto skip_msi;
2955 2972
2956 ret = pci_enable_msi(ha->pdev); 2973 ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7257c3c4f2d0..a9aae500e791 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
75 return QLA_FUNCTION_TIMEOUT; 75 return QLA_FUNCTION_TIMEOUT;
76 } 76 }
77 77
78 if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) { 78 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
79 /* Setting Link-Down error */ 79 /* Setting Link-Down error */
80 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 80 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
81 ql_log(ql_log_warn, vha, 0x1004, 81 ql_log(ql_log_warn, vha, 0x1004,
@@ -106,9 +106,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
106 spin_lock_irqsave(&ha->hardware_lock, flags); 106 spin_lock_irqsave(&ha->hardware_lock, flags);
107 107
108 /* Load mailbox registers. */ 108 /* Load mailbox registers. */
109 if (IS_QLA82XX(ha)) 109 if (IS_P3P_TYPE(ha))
110 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0]; 110 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
111 else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha)) 111 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
112 optr = (uint16_t __iomem *)&reg->isp24.mailbox0; 112 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
113 else 113 else
114 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); 114 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -117,33 +117,25 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
117 command = mcp->mb[0]; 117 command = mcp->mb[0];
118 mboxes = mcp->out_mb; 118 mboxes = mcp->out_mb;
119 119
120 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
121 "Mailbox registers (OUT):\n");
120 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 122 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
121 if (IS_QLA2200(ha) && cnt == 8) 123 if (IS_QLA2200(ha) && cnt == 8)
122 optr = 124 optr =
123 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8); 125 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
124 if (mboxes & BIT_0) 126 if (mboxes & BIT_0) {
127 ql_dbg(ql_dbg_mbx, vha, 0x1112,
128 "mbox[%d]<-0x%04x\n", cnt, *iptr);
125 WRT_REG_WORD(optr, *iptr); 129 WRT_REG_WORD(optr, *iptr);
130 }
126 131
127 mboxes >>= 1; 132 mboxes >>= 1;
128 optr++; 133 optr++;
129 iptr++; 134 iptr++;
130 } 135 }
131 136
132 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
133 "Loaded MBX registers (displayed in bytes) =.\n");
134 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
135 (uint8_t *)mcp->mb, 16);
136 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
137 ".\n");
138 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
139 ((uint8_t *)mcp->mb + 0x10), 16);
140 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
141 ".\n");
142 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
143 ((uint8_t *)mcp->mb + 0x20), 8);
144 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 137 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
145 "I/O Address = %p.\n", optr); 138 "I/O Address = %p.\n", optr);
146 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
147 139
148 /* Issue set host interrupt command to send cmd out. */ 140 /* Issue set host interrupt command to send cmd out. */
149 ha->flags.mbox_int = 0; 141 ha->flags.mbox_int = 0;
@@ -159,7 +151,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
159 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 151 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
160 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 152 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
161 153
162 if (IS_QLA82XX(ha)) { 154 if (IS_P3P_TYPE(ha)) {
163 if (RD_REG_DWORD(&reg->isp82.hint) & 155 if (RD_REG_DWORD(&reg->isp82.hint) &
164 HINT_MBX_INT_PENDING) { 156 HINT_MBX_INT_PENDING) {
165 spin_unlock_irqrestore(&ha->hardware_lock, 157 spin_unlock_irqrestore(&ha->hardware_lock,
@@ -189,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
189 ql_dbg(ql_dbg_mbx, vha, 0x1011, 181 ql_dbg(ql_dbg_mbx, vha, 0x1011,
190 "Cmd=%x Polling Mode.\n", command); 182 "Cmd=%x Polling Mode.\n", command);
191 183
192 if (IS_QLA82XX(ha)) { 184 if (IS_P3P_TYPE(ha)) {
193 if (RD_REG_DWORD(&reg->isp82.hint) & 185 if (RD_REG_DWORD(&reg->isp82.hint) &
194 HINT_MBX_INT_PENDING) { 186 HINT_MBX_INT_PENDING) {
195 spin_unlock_irqrestore(&ha->hardware_lock, 187 spin_unlock_irqrestore(&ha->hardware_lock,
@@ -236,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
236 ha->flags.mbox_int = 0; 228 ha->flags.mbox_int = 0;
237 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 229 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
238 230
239 if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) { 231 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
240 ha->flags.mbox_busy = 0; 232 ha->flags.mbox_busy = 0;
241 /* Setting Link-Down error */ 233 /* Setting Link-Down error */
242 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 234 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -254,9 +246,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
254 iptr2 = mcp->mb; 246 iptr2 = mcp->mb;
255 iptr = (uint16_t *)&ha->mailbox_out[0]; 247 iptr = (uint16_t *)&ha->mailbox_out[0];
256 mboxes = mcp->in_mb; 248 mboxes = mcp->in_mb;
249
250 ql_dbg(ql_dbg_mbx, vha, 0x1113,
251 "Mailbox registers (IN):\n");
257 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 252 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
258 if (mboxes & BIT_0) 253 if (mboxes & BIT_0) {
259 *iptr2 = *iptr; 254 *iptr2 = *iptr;
255 ql_dbg(ql_dbg_mbx, vha, 0x1114,
256 "mbox[%d]->0x%04x\n", cnt, *iptr2);
257 }
260 258
261 mboxes >>= 1; 259 mboxes >>= 1;
262 iptr2++; 260 iptr2++;
@@ -537,7 +535,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
537 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 535 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
538 mcp->out_mb = MBX_0; 536 mcp->out_mb = MBX_0;
539 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 537 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
540 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) 538 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
541 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 539 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
542 if (IS_FWI2_CAPABLE(ha)) 540 if (IS_FWI2_CAPABLE(ha))
543 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 541 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
@@ -556,7 +554,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
556 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 554 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
557 else 555 else
558 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 556 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
559 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) { 557 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
560 ha->mpi_version[0] = mcp->mb[10] & 0xff; 558 ha->mpi_version[0] = mcp->mb[10] & 0xff;
561 ha->mpi_version[1] = mcp->mb[11] >> 8; 559 ha->mpi_version[1] = mcp->mb[11] >> 8;
562 ha->mpi_version[2] = mcp->mb[11] & 0xff; 560 ha->mpi_version[2] = mcp->mb[11] & 0xff;
@@ -1201,7 +1199,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1202 "Entered %s.\n", __func__); 1200 "Entered %s.\n", __func__);
1203 1201
1204 if (IS_QLA82XX(ha) && ql2xdbwr) 1202 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1205 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1203 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
1206 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1204 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1207 1205
@@ -1667,7 +1665,11 @@ qla24xx_link_initialize(scsi_qla_host_t *vha)
1667 return QLA_FUNCTION_FAILED; 1665 return QLA_FUNCTION_FAILED;
1668 1666
1669 mcp->mb[0] = MBC_LINK_INITIALIZATION; 1667 mcp->mb[0] = MBC_LINK_INITIALIZATION;
1670 mcp->mb[1] = BIT_6|BIT_4; 1668 mcp->mb[1] = BIT_4;
1669 if (vha->hw->operating_mode == LOOP)
1670 mcp->mb[1] |= BIT_6;
1671 else
1672 mcp->mb[1] |= BIT_5;
1671 mcp->mb[2] = 0; 1673 mcp->mb[2] = 0;
1672 mcp->mb[3] = 0; 1674 mcp->mb[3] = 0;
1673 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1675 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3574,7 +3576,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3574 unsigned long flags; 3576 unsigned long flags;
3575 mbx_cmd_t mc; 3577 mbx_cmd_t mc;
3576 mbx_cmd_t *mcp = &mc; 3578 mbx_cmd_t *mcp = &mc;
3577 struct device_reg_25xxmq __iomem *reg;
3578 struct qla_hw_data *ha = vha->hw; 3579 struct qla_hw_data *ha = vha->hw;
3579 3580
3580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 3581 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
@@ -3595,9 +3596,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3595 if (IS_QLA83XX(ha)) 3596 if (IS_QLA83XX(ha))
3596 mcp->mb[15] = 0; 3597 mcp->mb[15] = 0;
3597 3598
3598 reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
3599 QLA_QUE_PAGE * req->id);
3600
3601 mcp->mb[4] = req->id; 3599 mcp->mb[4] = req->id;
3602 /* que in ptr index */ 3600 /* que in ptr index */
3603 mcp->mb[8] = 0; 3601 mcp->mb[8] = 0;
@@ -3619,12 +3617,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3619 3617
3620 spin_lock_irqsave(&ha->hardware_lock, flags); 3618 spin_lock_irqsave(&ha->hardware_lock, flags);
3621 if (!(req->options & BIT_0)) { 3619 if (!(req->options & BIT_0)) {
3622 WRT_REG_DWORD(&reg->req_q_in, 0); 3620 WRT_REG_DWORD(req->req_q_in, 0);
3623 if (!IS_QLA83XX(ha)) 3621 if (!IS_QLA83XX(ha))
3624 WRT_REG_DWORD(&reg->req_q_out, 0); 3622 WRT_REG_DWORD(req->req_q_out, 0);
3625 } 3623 }
3626 req->req_q_in = &reg->req_q_in;
3627 req->req_q_out = &reg->req_q_out;
3628 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3624 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3629 3625
3630 rval = qla2x00_mailbox_command(vha, mcp); 3626 rval = qla2x00_mailbox_command(vha, mcp);
@@ -3646,7 +3642,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3646 unsigned long flags; 3642 unsigned long flags;
3647 mbx_cmd_t mc; 3643 mbx_cmd_t mc;
3648 mbx_cmd_t *mcp = &mc; 3644 mbx_cmd_t *mcp = &mc;
3649 struct device_reg_25xxmq __iomem *reg;
3650 struct qla_hw_data *ha = vha->hw; 3645 struct qla_hw_data *ha = vha->hw;
3651 3646
3652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 3647 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
@@ -3664,9 +3659,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3664 if (IS_QLA83XX(ha)) 3659 if (IS_QLA83XX(ha))
3665 mcp->mb[15] = 0; 3660 mcp->mb[15] = 0;
3666 3661
3667 reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
3668 QLA_QUE_PAGE * rsp->id);
3669
3670 mcp->mb[4] = rsp->id; 3662 mcp->mb[4] = rsp->id;
3671 /* que in ptr index */ 3663 /* que in ptr index */
3672 mcp->mb[8] = 0; 3664 mcp->mb[8] = 0;
@@ -3690,9 +3682,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3690 3682
3691 spin_lock_irqsave(&ha->hardware_lock, flags); 3683 spin_lock_irqsave(&ha->hardware_lock, flags);
3692 if (!(rsp->options & BIT_0)) { 3684 if (!(rsp->options & BIT_0)) {
3693 WRT_REG_DWORD(&reg->rsp_q_out, 0); 3685 WRT_REG_DWORD(rsp->rsp_q_out, 0);
3694 if (!IS_QLA83XX(ha)) 3686 if (!IS_QLA83XX(ha))
3695 WRT_REG_DWORD(&reg->rsp_q_in, 0); 3687 WRT_REG_DWORD(rsp->rsp_q_in, 0);
3696 } 3688 }
3697 3689
3698 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3690 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3872,6 +3864,112 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3872 return rval; 3864 return rval;
3873} 3865}
3874 3866
3867int
3868qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
3869{
3870 int rval;
3871 mbx_cmd_t mc;
3872 mbx_cmd_t *mcp = &mc;
3873 int i;
3874 int len;
3875 uint16_t *str;
3876 struct qla_hw_data *ha = vha->hw;
3877
3878 if (!IS_P3P_TYPE(ha))
3879 return QLA_FUNCTION_FAILED;
3880
3881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
3882 "Entered %s.\n", __func__);
3883
3884 str = (void *)version;
3885 len = strlen(version);
3886
3887 mcp->mb[0] = MBC_SET_RNID_PARAMS;
3888 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
3889 mcp->out_mb = MBX_1|MBX_0;
3890 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
3891 mcp->mb[i] = cpu_to_le16p(str);
3892 mcp->out_mb |= 1<<i;
3893 }
3894 for (; i < 16; i++) {
3895 mcp->mb[i] = 0;
3896 mcp->out_mb |= 1<<i;
3897 }
3898 mcp->in_mb = MBX_1|MBX_0;
3899 mcp->tov = MBX_TOV_SECONDS;
3900 mcp->flags = 0;
3901 rval = qla2x00_mailbox_command(vha, mcp);
3902
3903 if (rval != QLA_SUCCESS) {
3904 ql_dbg(ql_dbg_mbx, vha, 0x117c,
3905 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
3906 } else {
3907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
3908 "Done %s.\n", __func__);
3909 }
3910
3911 return rval;
3912}
3913
3914int
3915qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
3916{
3917 int rval;
3918 mbx_cmd_t mc;
3919 mbx_cmd_t *mcp = &mc;
3920 int len;
3921 uint16_t dwlen;
3922 uint8_t *str;
3923 dma_addr_t str_dma;
3924 struct qla_hw_data *ha = vha->hw;
3925
3926 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
3927 IS_P3P_TYPE(ha))
3928 return QLA_FUNCTION_FAILED;
3929
3930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
3931 "Entered %s.\n", __func__);
3932
3933 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
3934 if (!str) {
3935 ql_log(ql_log_warn, vha, 0x117f,
3936 "Failed to allocate driver version param.\n");
3937 return QLA_MEMORY_ALLOC_FAILED;
3938 }
3939
3940 memcpy(str, "\x7\x3\x11\x0", 4);
3941 dwlen = str[0];
3942 len = dwlen * 4 - 4;
3943 memset(str + 4, 0, len);
3944 if (len > strlen(version))
3945 len = strlen(version);
3946 memcpy(str + 4, version, len);
3947
3948 mcp->mb[0] = MBC_SET_RNID_PARAMS;
3949 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
3950 mcp->mb[2] = MSW(LSD(str_dma));
3951 mcp->mb[3] = LSW(LSD(str_dma));
3952 mcp->mb[6] = MSW(MSD(str_dma));
3953 mcp->mb[7] = LSW(MSD(str_dma));
3954 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3955 mcp->in_mb = MBX_1|MBX_0;
3956 mcp->tov = MBX_TOV_SECONDS;
3957 mcp->flags = 0;
3958 rval = qla2x00_mailbox_command(vha, mcp);
3959
3960 if (rval != QLA_SUCCESS) {
3961 ql_dbg(ql_dbg_mbx, vha, 0x1180,
3962 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
3963 } else {
3964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
3965 "Done %s.\n", __func__);
3966 }
3967
3968 dma_pool_free(ha->s_dma_pool, str, str_dma);
3969
3970 return rval;
3971}
3972
3875static int 3973static int
3876qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 3974qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
3877{ 3975{
@@ -4407,7 +4505,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 4505 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4408 "Entered %s.\n", __func__); 4506 "Entered %s.\n", __func__);
4409 4507
4410 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 4508 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha))
4411 return QLA_FUNCTION_FAILED; 4509 return QLA_FUNCTION_FAILED;
4412 mcp->mb[0] = MBC_GET_PORT_CONFIG; 4510 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4413 mcp->out_mb = MBX_0; 4511 mcp->out_mb = MBX_0;
@@ -4512,40 +4610,43 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
4512 struct qla_hw_data *ha = vha->hw; 4610 struct qla_hw_data *ha = vha->hw;
4513 uint8_t byte; 4611 uint8_t byte;
4514 4612
4515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, 4613 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
4516 "Entered %s.\n", __func__); 4614 ql_dbg(ql_dbg_mbx, vha, 0x1150,
4517 4615 "Thermal not supported by this card.\n");
4518 if (ha->thermal_support & THERMAL_SUPPORT_I2C) { 4616 return rval;
4519 rval = qla2x00_read_sfp(vha, 0, &byte,
4520 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
4521 *temp = byte;
4522 if (rval == QLA_SUCCESS)
4523 goto done;
4524
4525 ql_log(ql_log_warn, vha, 0x10c9,
4526 "Thermal not supported through I2C bus, trying alternate "
4527 "method (ISP access).\n");
4528 ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
4529 } 4617 }
4530 4618
4531 if (ha->thermal_support & THERMAL_SUPPORT_ISP) { 4619 if (IS_QLA25XX(ha)) {
4532 rval = qla2x00_read_asic_temperature(vha, temp); 4620 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4533 if (rval == QLA_SUCCESS) 4621 ha->pdev->subsystem_device == 0x0175) {
4534 goto done; 4622 rval = qla2x00_read_sfp(vha, 0, &byte,
4535 4623 0x98, 0x1, 1, BIT_13|BIT_0);
4536 ql_log(ql_log_warn, vha, 0x1019, 4624 *temp = byte;
4537 "Thermal not supported through ISP.\n"); 4625 return rval;
4538 ha->thermal_support &= ~THERMAL_SUPPORT_ISP; 4626 }
4627 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4628 ha->pdev->subsystem_device == 0x338e) {
4629 rval = qla2x00_read_sfp(vha, 0, &byte,
4630 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
4631 *temp = byte;
4632 return rval;
4633 }
4634 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
4635 "Thermal not supported by this card.\n");
4636 return rval;
4539 } 4637 }
4540 4638
4541 ql_log(ql_log_warn, vha, 0x1150, 4639 if (IS_QLA82XX(ha)) {
4542 "Thermal not supported by this card " 4640 *temp = qla82xx_read_temperature(vha);
4543 "(ignoring further requests).\n"); 4641 rval = QLA_SUCCESS;
4544 return rval; 4642 return rval;
4643 } else if (IS_QLA8044(ha)) {
4644 *temp = qla8044_read_temperature(vha);
4645 rval = QLA_SUCCESS;
4646 return rval;
4647 }
4545 4648
4546done: 4649 rval = qla2x00_read_asic_temperature(vha, temp);
4547 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
4548 "Done %s.\n", __func__);
4549 return rval; 4650 return rval;
4550} 4651}
4551 4652
@@ -4595,7 +4696,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 4696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
4596 "Entered %s.\n", __func__); 4697 "Entered %s.\n", __func__);
4597 4698
4598 if (!IS_QLA82XX(ha)) 4699 if (!IS_P3P_TYPE(ha))
4599 return QLA_FUNCTION_FAILED; 4700 return QLA_FUNCTION_FAILED;
4600 4701
4601 memset(mcp, 0, sizeof(mbx_cmd_t)); 4702 memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4713,6 +4814,60 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4713} 4814}
4714 4815
4715int 4816int
4817qla8044_md_get_template(scsi_qla_host_t *vha)
4818{
4819 struct qla_hw_data *ha = vha->hw;
4820 mbx_cmd_t mc;
4821 mbx_cmd_t *mcp = &mc;
4822 int rval = QLA_FUNCTION_FAILED;
4823 int offset = 0, size = MINIDUMP_SIZE_36K;
4824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
4825 "Entered %s.\n", __func__);
4826
4827 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4828 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4829 if (!ha->md_tmplt_hdr) {
4830 ql_log(ql_log_warn, vha, 0xb11b,
4831 "Unable to allocate memory for Minidump template.\n");
4832 return rval;
4833 }
4834
4835 memset(mcp->mb, 0 , sizeof(mcp->mb));
4836 while (offset < ha->md_template_size) {
4837 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4838 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4839 mcp->mb[2] = LSW(RQST_TMPLT);
4840 mcp->mb[3] = MSW(RQST_TMPLT);
4841 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
4842 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
4843 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
4844 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
4845 mcp->mb[8] = LSW(size);
4846 mcp->mb[9] = MSW(size);
4847 mcp->mb[10] = offset & 0x0000FFFF;
4848 mcp->mb[11] = offset & 0xFFFF0000;
4849 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4850 mcp->tov = MBX_TOV_SECONDS;
4851 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4852 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4853 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4854 rval = qla2x00_mailbox_command(vha, mcp);
4855
4856 if (rval != QLA_SUCCESS) {
4857 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
4858 "mailbox command FAILED=0x%x, subcode=%x.\n",
4859 ((mcp->mb[1] << 16) | mcp->mb[0]),
4860 ((mcp->mb[3] << 16) | mcp->mb[2]));
4861 return rval;
4862 } else
4863 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
4864 "Done %s.\n", __func__);
4865 offset = offset + size;
4866 }
4867 return rval;
4868}
4869
4870int
4716qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 4871qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4717{ 4872{
4718 int rval; 4873 int rval;
@@ -4808,7 +4963,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4808 mbx_cmd_t mc; 4963 mbx_cmd_t mc;
4809 mbx_cmd_t *mcp = &mc; 4964 mbx_cmd_t *mcp = &mc;
4810 4965
4811 if (!IS_QLA82XX(ha)) 4966 if (!IS_P3P_TYPE(ha))
4812 return QLA_FUNCTION_FAILED; 4967 return QLA_FUNCTION_FAILED;
4813 4968
4814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 4969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f868a9f98afe..a72df701fb38 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -699,6 +699,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
699 req->cnt = req->length; 699 req->cnt = req->length;
700 req->id = que_id; 700 req->id = que_id;
701 reg = ISP_QUE_REG(ha, que_id); 701 reg = ISP_QUE_REG(ha, que_id);
702 req->req_q_in = &reg->isp25mq.req_q_in;
703 req->req_q_out = &reg->isp25mq.req_q_out;
702 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 704 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
703 mutex_unlock(&ha->vport_lock); 705 mutex_unlock(&ha->vport_lock);
704 ql_dbg(ql_dbg_multiq, base_vha, 0xc004, 706 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index d7993797f46e..2482975d72b2 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -294,7 +294,7 @@ premature_exit:
294 * Context: 294 * Context:
295 * Kernel context. 295 * Kernel context.
296 */ 296 */
297static int 297int
298qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) 298qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
299{ 299{
300 int rval; 300 int rval;
@@ -776,6 +776,29 @@ qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
776} 776}
777 777
778int 778int
779qlafx00_loop_reset(scsi_qla_host_t *vha)
780{
781 int ret;
782 struct fc_port *fcport;
783 struct qla_hw_data *ha = vha->hw;
784
785 if (ql2xtargetreset) {
786 list_for_each_entry(fcport, &vha->vp_fcports, list) {
787 if (fcport->port_type != FCT_TARGET)
788 continue;
789
790 ret = ha->isp_ops->target_reset(fcport, 0, 0);
791 if (ret != QLA_SUCCESS) {
792 ql_dbg(ql_dbg_taskm, vha, 0x803d,
793 "Bus Reset failed: Reset=%d "
794 "d_id=%x.\n", ret, fcport->d_id.b24);
795 }
796 }
797 }
798 return QLA_SUCCESS;
799}
800
801int
779qlafx00_iospace_config(struct qla_hw_data *ha) 802qlafx00_iospace_config(struct qla_hw_data *ha)
780{ 803{
781 if (pci_request_selected_regions(ha->pdev, ha->bars, 804 if (pci_request_selected_regions(ha->pdev, ha->bars,
@@ -918,12 +941,23 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
918 struct qla_hw_data *ha = vha->hw; 941 struct qla_hw_data *ha = vha->hw;
919 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 942 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
920 uint32_t aenmbx, aenmbx7 = 0; 943 uint32_t aenmbx, aenmbx7 = 0;
944 uint32_t pseudo_aen;
921 uint32_t state[5]; 945 uint32_t state[5];
922 bool done = false; 946 bool done = false;
923 947
924 /* 30 seconds wait - Adjust if required */ 948 /* 30 seconds wait - Adjust if required */
925 wait_time = 30; 949 wait_time = 30;
926 950
951 pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
952 if (pseudo_aen == 1) {
953 aenmbx7 = RD_REG_DWORD(&reg->initval7);
954 ha->mbx_intr_code = MSW(aenmbx7);
955 ha->rqstq_intr_code = LSW(aenmbx7);
956 rval = qlafx00_driver_shutdown(vha, 10);
957 if (rval != QLA_SUCCESS)
958 qlafx00_soft_reset(vha);
959 }
960
927 /* wait time before firmware ready */ 961 /* wait time before firmware ready */
928 wtime = jiffies + (wait_time * HZ); 962 wtime = jiffies + (wait_time * HZ);
929 do { 963 do {
@@ -1349,21 +1383,22 @@ qlafx00_configure_devices(scsi_qla_host_t *vha)
1349} 1383}
1350 1384
1351static void 1385static void
1352qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha) 1386qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
1353{ 1387{
1354 struct qla_hw_data *ha = vha->hw; 1388 struct qla_hw_data *ha = vha->hw;
1355 fc_port_t *fcport; 1389 fc_port_t *fcport;
1356 1390
1357 vha->flags.online = 0; 1391 vha->flags.online = 0;
1358 ha->flags.chip_reset_done = 0;
1359 ha->mr.fw_hbt_en = 0; 1392 ha->mr.fw_hbt_en = 0;
1360 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1361 vha->qla_stats.total_isp_aborts++;
1362
1363 ql_log(ql_log_info, vha, 0x013f,
1364 "Performing ISP error recovery - ha = %p.\n", ha);
1365 1393
1366 ha->isp_ops->reset_chip(vha); 1394 if (!critemp) {
1395 ha->flags.chip_reset_done = 0;
1396 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1397 vha->qla_stats.total_isp_aborts++;
1398 ql_log(ql_log_info, vha, 0x013f,
1399 "Performing ISP error recovery - ha = %p.\n", ha);
1400 ha->isp_ops->reset_chip(vha);
1401 }
1367 1402
1368 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1403 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1369 atomic_set(&vha->loop_state, LOOP_DOWN); 1404 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1383,12 +1418,19 @@ qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
1383 } 1418 }
1384 1419
1385 if (!ha->flags.eeh_busy) { 1420 if (!ha->flags.eeh_busy) {
1386 /* Requeue all commands in outstanding command list. */ 1421 if (critemp) {
1387 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 1422 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1423 } else {
1424 /* Requeue all commands in outstanding command list. */
1425 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
1426 }
1388 } 1427 }
1389 1428
1390 qla2x00_free_irqs(vha); 1429 qla2x00_free_irqs(vha);
1391 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); 1430 if (critemp)
1431 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
1432 else
1433 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1392 1434
1393 /* Clear the Interrupts */ 1435 /* Clear the Interrupts */
1394 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); 1436 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
@@ -1475,6 +1517,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
1475 uint32_t fw_heart_beat; 1517 uint32_t fw_heart_beat;
1476 uint32_t aenmbx0; 1518 uint32_t aenmbx0;
1477 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 1519 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1520 uint32_t tempc;
1478 1521
1479 /* Check firmware health */ 1522 /* Check firmware health */
1480 if (ha->mr.fw_hbt_cnt) 1523 if (ha->mr.fw_hbt_cnt)
@@ -1539,10 +1582,36 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
1539 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { 1582 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
1540 ha->mr.fw_reset_timer_tick = 1583 ha->mr.fw_reset_timer_tick =
1541 QLAFX00_MAX_RESET_INTERVAL; 1584 QLAFX00_MAX_RESET_INTERVAL;
1585 } else if (aenmbx0 == MBA_FW_RESET_FCT) {
1586 ha->mr.fw_reset_timer_tick =
1587 QLAFX00_MAX_RESET_INTERVAL;
1542 } 1588 }
1543 ha->mr.old_aenmbx0_state = aenmbx0; 1589 ha->mr.old_aenmbx0_state = aenmbx0;
1544 ha->mr.fw_reset_timer_tick--; 1590 ha->mr.fw_reset_timer_tick--;
1545 } 1591 }
1592 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
1593 /*
1594 * Critical temperature recovery to be
1595 * performed in timer routine
1596 */
1597 if (ha->mr.fw_critemp_timer_tick == 0) {
1598 tempc = QLAFX00_GET_TEMPERATURE(ha);
1599 ql_dbg(ql_dbg_timer, vha, 0x6012,
1600 "ISPFx00(%s): Critical temp timer, "
1601 "current SOC temperature: %d\n",
1602 __func__, tempc);
1603 if (tempc < ha->mr.critical_temperature) {
1604 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1605 clear_bit(FX00_CRITEMP_RECOVERY,
1606 &vha->dpc_flags);
1607 qla2xxx_wake_dpc(vha);
1608 }
1609 ha->mr.fw_critemp_timer_tick =
1610 QLAFX00_CRITEMP_INTERVAL;
1611 } else {
1612 ha->mr.fw_critemp_timer_tick--;
1613 }
1614 }
1546} 1615}
1547 1616
1548/* 1617/*
@@ -1570,7 +1639,7 @@ qlafx00_reset_initialize(scsi_qla_host_t *vha)
1570 1639
1571 if (vha->flags.online) { 1640 if (vha->flags.online) {
1572 scsi_block_requests(vha->host); 1641 scsi_block_requests(vha->host);
1573 qlafx00_abort_isp_cleanup(vha); 1642 qlafx00_abort_isp_cleanup(vha, false);
1574 } 1643 }
1575 1644
1576 ql_log(ql_log_info, vha, 0x0143, 1645 ql_log(ql_log_info, vha, 0x0143,
@@ -1602,7 +1671,15 @@ qlafx00_abort_isp(scsi_qla_host_t *vha)
1602 } 1671 }
1603 1672
1604 scsi_block_requests(vha->host); 1673 scsi_block_requests(vha->host);
1605 qlafx00_abort_isp_cleanup(vha); 1674 qlafx00_abort_isp_cleanup(vha, false);
1675 } else {
1676 scsi_block_requests(vha->host);
1677 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1678 vha->qla_stats.total_isp_aborts++;
1679 ha->isp_ops->reset_chip(vha);
1680 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1681 /* Clear the Interrupts */
1682 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1606 } 1683 }
1607 1684
1608 ql_log(ql_log_info, vha, 0x0145, 1685 ql_log(ql_log_info, vha, 0x0145,
@@ -1688,6 +1765,15 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
1688 aen_code = FCH_EVT_LINKDOWN; 1765 aen_code = FCH_EVT_LINKDOWN;
1689 aen_data = 0; 1766 aen_data = 0;
1690 break; 1767 break;
1768 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
1769 ql_log(ql_log_info, vha, 0x5082,
1770 "Process critical temperature event "
1771 "aenmb[0]: %x\n",
1772 evt->u.aenfx.evtcode);
1773 scsi_block_requests(vha->host);
1774 qlafx00_abort_isp_cleanup(vha, true);
1775 scsi_unblock_requests(vha->host);
1776 break;
1691 } 1777 }
1692 1778
1693 fc_host_post_event(vha->host, fc_get_event_number(), 1779 fc_host_post_event(vha->host, fc_get_event_number(),
@@ -1879,6 +1965,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1879 sizeof(vha->hw->mr.uboot_version)); 1965 sizeof(vha->hw->mr.uboot_version));
1880 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, 1966 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
1881 sizeof(vha->hw->mr.fru_serial_num)); 1967 sizeof(vha->hw->mr.fru_serial_num));
1968 vha->hw->mr.critical_temperature =
1969 (pinfo->nominal_temp_value) ?
1970 pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
1971 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
1972 QLAFX00_EXTENDED_IO_EN_MASK) != 0;
1882 } else if (fx_type == FXDISC_GET_PORT_INFO) { 1973 } else if (fx_type == FXDISC_GET_PORT_INFO) {
1883 struct port_info_data *pinfo = 1974 struct port_info_data *pinfo =
1884 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; 1975 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
@@ -2021,6 +2112,7 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2021{ 2112{
2022 int rval; 2113 int rval;
2023 struct qla_hw_data *ha = vha->hw; 2114 struct qla_hw_data *ha = vha->hw;
2115 uint32_t tempc;
2024 2116
2025 /* Clear adapter flags. */ 2117 /* Clear adapter flags. */
2026 vha->flags.online = 0; 2118 vha->flags.online = 0;
@@ -2028,7 +2120,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2028 vha->flags.reset_active = 0; 2120 vha->flags.reset_active = 0;
2029 ha->flags.pci_channel_io_perm_failure = 0; 2121 ha->flags.pci_channel_io_perm_failure = 0;
2030 ha->flags.eeh_busy = 0; 2122 ha->flags.eeh_busy = 0;
2031 ha->thermal_support = 0;
2032 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 2123 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2033 atomic_set(&vha->loop_state, LOOP_DOWN); 2124 atomic_set(&vha->loop_state, LOOP_DOWN);
2034 vha->device_flags = DFLG_NO_CABLE; 2125 vha->device_flags = DFLG_NO_CABLE;
@@ -2072,6 +2163,11 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2072 rval = qla2x00_init_rings(vha); 2163 rval = qla2x00_init_rings(vha);
2073 ha->flags.chip_reset_done = 1; 2164 ha->flags.chip_reset_done = 1;
2074 2165
2166 tempc = QLAFX00_GET_TEMPERATURE(ha);
2167 ql_dbg(ql_dbg_init, vha, 0x0152,
2168 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
2169 __func__, tempc);
2170
2075 return rval; 2171 return rval;
2076} 2172}
2077 2173
@@ -2526,16 +2622,13 @@ check_scsi_status:
2526 2622
2527 if (logit) 2623 if (logit)
2528 ql_dbg(ql_dbg_io, fcport->vha, 0x3058, 2624 ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2529 "FCP command status: 0x%x-0x%x (0x%x) " 2625 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
2530 "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x" 2626 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
2531 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 2627 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
2532 "rsp_info=0x%x resid=0x%x fw_resid=0x%x " 2628 "par_sense_len=0x%x, rsp_info_len=0x%x\n",
2533 "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
2534 comp_status, scsi_status, res, vha->host_no, 2629 comp_status, scsi_status, res, vha->host_no,
2535 cp->device->id, cp->device->lun, fcport->tgt_id, 2630 cp->device->id, cp->device->lun, fcport->tgt_id,
2536 lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], 2631 lscsi_status, cp->cmnd, scsi_bufflen(cp),
2537 cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
2538 cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
2539 rsp_info_len, resid_len, fw_resid_len, sense_len, 2632 rsp_info_len, resid_len, fw_resid_len, sense_len,
2540 par_sense_len, rsp_info_len); 2633 par_sense_len, rsp_info_len);
2541 2634
@@ -2720,9 +2813,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2720 struct sts_entry_fx00 *pkt; 2813 struct sts_entry_fx00 *pkt;
2721 response_t *lptr; 2814 response_t *lptr;
2722 2815
2723 if (!vha->flags.online)
2724 return;
2725
2726 while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) != 2816 while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
2727 RESPONSE_PROCESSED) { 2817 RESPONSE_PROCESSED) {
2728 lptr = rsp->ring_ptr; 2818 lptr = rsp->ring_ptr;
@@ -2824,6 +2914,28 @@ qlafx00_async_event(scsi_qla_host_t *vha)
2824 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); 2914 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2825 data_size = 4; 2915 data_size = 4;
2826 break; 2916 break;
2917
2918 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
2919 ql_log(ql_log_info, vha, 0x5085,
2920 "Asynchronous over temperature event received "
2921 "aenmb[0]: %x\n",
2922 ha->aenmb[0]);
2923 break;
2924
2925 case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */
2926 ql_log(ql_log_info, vha, 0x5086,
2927 "Asynchronous normal temperature event received "
2928 "aenmb[0]: %x\n",
2929 ha->aenmb[0]);
2930 break;
2931
2932 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
2933 ql_log(ql_log_info, vha, 0x5083,
2934 "Asynchronous critical temperature event received "
2935 "aenmb[0]: %x\n",
2936 ha->aenmb[0]);
2937 break;
2938
2827 default: 2939 default:
2828 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1); 2940 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
2829 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2); 2941 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 1a092af0e2c3..79a93c52baec 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -329,11 +329,13 @@ struct config_info_data {
329 uint64_t adapter_id; 329 uint64_t adapter_id;
330 330
331 uint32_t cluster_key_len; 331 uint32_t cluster_key_len;
332 uint8_t cluster_key[10]; 332 uint8_t cluster_key[16];
333 333
334 uint64_t cluster_master_id; 334 uint64_t cluster_master_id;
335 uint64_t cluster_slave_id; 335 uint64_t cluster_slave_id;
336 uint8_t cluster_flags; 336 uint8_t cluster_flags;
337 uint32_t enabled_capabilities;
338 uint32_t nominal_temp_value;
337} __packed; 339} __packed;
338 340
339#define FXDISC_GET_CONFIG_INFO 0x01 341#define FXDISC_GET_CONFIG_INFO 0x01
@@ -342,10 +344,11 @@ struct config_info_data {
342#define FXDISC_GET_TGT_NODE_LIST 0x81 344#define FXDISC_GET_TGT_NODE_LIST 0x81
343#define FXDISC_REG_HOST_INFO 0x99 345#define FXDISC_REG_HOST_INFO 0x99
344 346
345#define QLAFX00_HBA_ICNTRL_REG 0x21B08 347#define QLAFX00_HBA_ICNTRL_REG 0x20B08
346#define QLAFX00_ICR_ENB_MASK 0x80000000 348#define QLAFX00_ICR_ENB_MASK 0x80000000
347#define QLAFX00_ICR_DIS_MASK 0x7fffffff 349#define QLAFX00_ICR_DIS_MASK 0x7fffffff
348#define QLAFX00_HST_RST_REG 0x18264 350#define QLAFX00_HST_RST_REG 0x18264
351#define QLAFX00_SOC_TEMP_REG 0x184C4
349#define QLAFX00_HST_TO_HBA_REG 0x20A04 352#define QLAFX00_HST_TO_HBA_REG 0x20A04
350#define QLAFX00_HBA_TO_HOST_REG 0x21B70 353#define QLAFX00_HBA_TO_HOST_REG 0x21B70
351#define QLAFX00_HST_INT_STS_BITS 0x7 354#define QLAFX00_HST_INT_STS_BITS 0x7
@@ -361,6 +364,9 @@ struct config_info_data {
361#define QLAFX00_INTR_ALL_CMPLT 0x7 364#define QLAFX00_INTR_ALL_CMPLT 0x7
362 365
363#define QLAFX00_MBA_SYSTEM_ERR 0x8002 366#define QLAFX00_MBA_SYSTEM_ERR 0x8002
367#define QLAFX00_MBA_TEMP_OVER 0x8005
368#define QLAFX00_MBA_TEMP_NORM 0x8006
369#define QLAFX00_MBA_TEMP_CRIT 0x8007
364#define QLAFX00_MBA_LINK_UP 0x8011 370#define QLAFX00_MBA_LINK_UP 0x8011
365#define QLAFX00_MBA_LINK_DOWN 0x8012 371#define QLAFX00_MBA_LINK_DOWN 0x8012
366#define QLAFX00_MBA_PORT_UPDATE 0x8014 372#define QLAFX00_MBA_PORT_UPDATE 0x8014
@@ -434,9 +440,11 @@ struct qla_mt_iocb_rqst_fx00 {
434 440
435 __le32 dataword_extra; 441 __le32 dataword_extra;
436 442
437 __le32 req_len; 443 __le16 req_len;
444 __le16 reserved_2;
438 445
439 __le32 rsp_len; 446 __le16 rsp_len;
447 __le16 reserved_3;
440}; 448};
441 449
442struct qla_mt_iocb_rsp_fx00 { 450struct qla_mt_iocb_rsp_fx00 {
@@ -499,12 +507,37 @@ struct mr_data_fx00 {
499 uint32_t old_fw_hbt_cnt; 507 uint32_t old_fw_hbt_cnt;
500 uint16_t fw_reset_timer_tick; 508 uint16_t fw_reset_timer_tick;
501 uint8_t fw_reset_timer_exp; 509 uint8_t fw_reset_timer_exp;
510 uint16_t fw_critemp_timer_tick;
502 uint32_t old_aenmbx0_state; 511 uint32_t old_aenmbx0_state;
512 uint32_t critical_temperature;
513 bool extended_io_enabled;
503}; 514};
504 515
516#define QLAFX00_EXTENDED_IO_EN_MASK 0x20
517
518/*
519 * SoC Junction Temperature is stored in
520 * bits 9:1 of SoC Junction Temperature Register
521 * in a firmware specific format format.
522 * To get the temperature in Celsius degrees
523 * the value from this bitfiled should be converted
524 * using this formula:
525 * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825)
526 * where X is the bit field value
527 * this macro reads the register, extracts the bitfield value,
528 * performs the calcualtions and returns temperature in Celsius
529 */
530#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \
531 ((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825)
532
533
505#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */ 534#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */
506#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */ 535#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */
507#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */ 536#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */
508#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */ 537#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
509#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */ 538#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
539#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */
540
541#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */
542
510#endif 543#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index cce0cd0d7ec4..11ce53dcbe7e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -848,7 +848,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
848{ 848{
849 int done = 0, timeout = 0; 849 int done = 0, timeout = 0;
850 uint32_t lock_owner = 0; 850 uint32_t lock_owner = 0;
851 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
852 851
853 while (!done) { 852 while (!done) {
854 /* acquire semaphore2 from PCI HW block */ 853 /* acquire semaphore2 from PCI HW block */
@@ -857,9 +856,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
857 break; 856 break;
858 if (timeout >= qla82xx_rom_lock_timeout) { 857 if (timeout >= qla82xx_rom_lock_timeout) {
859 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); 858 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
860 ql_dbg(ql_dbg_p3p, vha, 0xb085,
861 "Failed to acquire rom lock, acquired by %d.\n",
862 lock_owner);
863 return -1; 859 return -1;
864 } 860 }
865 timeout++; 861 timeout++;
@@ -1666,8 +1662,14 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1666 } 1662 }
1667 1663
1668 /* Mapping of IO base pointer */ 1664 /* Mapping of IO base pointer */
1669 ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + 1665 if (IS_QLA8044(ha)) {
1670 0xbc000 + (ha->pdev->devfn << 11)); 1666 ha->iobase =
1667 (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase);
1668 } else if (IS_QLA82XX(ha)) {
1669 ha->iobase =
1670 (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1671 0xbc000 + (ha->pdev->devfn << 11));
1672 }
1671 1673
1672 if (!ql2xdbwr) { 1674 if (!ql2xdbwr) {
1673 ha->nxdb_wr_ptr = 1675 ha->nxdb_wr_ptr =
@@ -1967,7 +1969,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \
1967 * @ha: SCSI driver HA context 1969 * @ha: SCSI driver HA context
1968 * @mb0: Mailbox0 register 1970 * @mb0: Mailbox0 register
1969 */ 1971 */
1970static void 1972void
1971qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1973qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1972{ 1974{
1973 uint16_t cnt; 1975 uint16_t cnt;
@@ -2075,13 +2077,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
2075 WRT_REG_DWORD(&reg->host_int, 0); 2077 WRT_REG_DWORD(&reg->host_int, 0);
2076 } 2078 }
2077 2079
2078#ifdef QL_DEBUG_LEVEL_17
2079 if (!irq && ha->flags.eeh_busy)
2080 ql_log(ql_log_warn, vha, 0x503d,
2081 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2082 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2083#endif
2084
2085 qla2x00_handle_mbx_completion(ha, status); 2080 qla2x00_handle_mbx_completion(ha, status);
2086 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2081 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2087 2082
@@ -2147,13 +2142,6 @@ qla82xx_msix_default(int irq, void *dev_id)
2147 WRT_REG_DWORD(&reg->host_int, 0); 2142 WRT_REG_DWORD(&reg->host_int, 0);
2148 } while (0); 2143 } while (0);
2149 2144
2150#ifdef QL_DEBUG_LEVEL_17
2151 if (!irq && ha->flags.eeh_busy)
2152 ql_log(ql_log_warn, vha, 0x5044,
2153 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2154 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2155#endif
2156
2157 qla2x00_handle_mbx_completion(ha, status); 2145 qla2x00_handle_mbx_completion(ha, status);
2158 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2146 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2159 2147
@@ -2247,7 +2235,10 @@ qla82xx_enable_intrs(struct qla_hw_data *ha)
2247 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2235 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2248 qla82xx_mbx_intr_enable(vha); 2236 qla82xx_mbx_intr_enable(vha);
2249 spin_lock_irq(&ha->hardware_lock); 2237 spin_lock_irq(&ha->hardware_lock);
2250 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 2238 if (IS_QLA8044(ha))
2239 qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0);
2240 else
2241 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2251 spin_unlock_irq(&ha->hardware_lock); 2242 spin_unlock_irq(&ha->hardware_lock);
2252 ha->interrupts_on = 1; 2243 ha->interrupts_on = 1;
2253} 2244}
@@ -2258,7 +2249,10 @@ qla82xx_disable_intrs(struct qla_hw_data *ha)
2258 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2249 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2259 qla82xx_mbx_intr_disable(vha); 2250 qla82xx_mbx_intr_disable(vha);
2260 spin_lock_irq(&ha->hardware_lock); 2251 spin_lock_irq(&ha->hardware_lock);
2261 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); 2252 if (IS_QLA8044(ha))
2253 qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
2254 else
2255 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2262 spin_unlock_irq(&ha->hardware_lock); 2256 spin_unlock_irq(&ha->hardware_lock);
2263 ha->interrupts_on = 0; 2257 ha->interrupts_on = 0;
2264} 2258}
@@ -3008,6 +3002,9 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
3008 if (IS_QLA82XX(ha)) { 3002 if (IS_QLA82XX(ha)) {
3009 qla82xx_clear_drv_active(ha); 3003 qla82xx_clear_drv_active(ha);
3010 qla82xx_idc_unlock(ha); 3004 qla82xx_idc_unlock(ha);
3005 } else if (IS_QLA8044(ha)) {
3006 qla8044_clear_drv_active(vha);
3007 qla8044_idc_unlock(ha);
3011 } 3008 }
3012 3009
3013 /* Set DEV_FAILED flag to disable timer */ 3010 /* Set DEV_FAILED flag to disable timer */
@@ -3134,7 +3131,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3134 if (fw_major_version != ha->fw_major_version || 3131 if (fw_major_version != ha->fw_major_version ||
3135 fw_minor_version != ha->fw_minor_version || 3132 fw_minor_version != ha->fw_minor_version ||
3136 fw_subminor_version != ha->fw_subminor_version) { 3133 fw_subminor_version != ha->fw_subminor_version) {
3137 ql_log(ql_log_info, vha, 0xb02d, 3134 ql_dbg(ql_dbg_p3p, vha, 0xb02d,
3138 "Firmware version differs " 3135 "Firmware version differs "
3139 "Previous version: %d:%d:%d - " 3136 "Previous version: %d:%d:%d - "
3140 "New version: %d:%d:%d\n", 3137 "New version: %d:%d:%d\n",
@@ -3330,6 +3327,14 @@ static int qla82xx_check_temp(scsi_qla_host_t *vha)
3330 return 0; 3327 return 0;
3331} 3328}
3332 3329
3330int qla82xx_read_temperature(scsi_qla_host_t *vha)
3331{
3332 uint32_t temp;
3333
3334 temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE);
3335 return qla82xx_get_temp_val(temp);
3336}
3337
3333void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) 3338void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
3334{ 3339{
3335 struct qla_hw_data *ha = vha->hw; 3340 struct qla_hw_data *ha = vha->hw;
@@ -3423,8 +3428,18 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3423 3428
3424int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3429int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3425{ 3430{
3426 int rval; 3431 int rval = -1;
3427 rval = qla82xx_device_state_handler(vha); 3432 struct qla_hw_data *ha = vha->hw;
3433
3434 if (IS_QLA82XX(ha))
3435 rval = qla82xx_device_state_handler(vha);
3436 else if (IS_QLA8044(ha)) {
3437 qla8044_idc_lock(ha);
3438 /* Decide the reset ownership */
3439 qla83xx_reset_ownership(vha);
3440 qla8044_idc_unlock(ha);
3441 rval = qla8044_device_state_handler(vha);
3442 }
3428 return rval; 3443 return rval;
3429} 3444}
3430 3445
@@ -3432,17 +3447,25 @@ void
3432qla82xx_set_reset_owner(scsi_qla_host_t *vha) 3447qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3433{ 3448{
3434 struct qla_hw_data *ha = vha->hw; 3449 struct qla_hw_data *ha = vha->hw;
3435 uint32_t dev_state; 3450 uint32_t dev_state = 0;
3451
3452 if (IS_QLA82XX(ha))
3453 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3454 else if (IS_QLA8044(ha))
3455 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
3436 3456
3437 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3438 if (dev_state == QLA8XXX_DEV_READY) { 3457 if (dev_state == QLA8XXX_DEV_READY) {
3439 ql_log(ql_log_info, vha, 0xb02f, 3458 ql_log(ql_log_info, vha, 0xb02f,
3440 "HW State: NEED RESET\n"); 3459 "HW State: NEED RESET\n");
3441 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3460 if (IS_QLA82XX(ha)) {
3442 QLA8XXX_DEV_NEED_RESET); 3461 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3443 ha->flags.nic_core_reset_owner = 1; 3462 QLA8XXX_DEV_NEED_RESET);
3444 ql_dbg(ql_dbg_p3p, vha, 0xb030, 3463 ha->flags.nic_core_reset_owner = 1;
3445 "reset_owner is 0x%x\n", ha->portnum); 3464 ql_dbg(ql_dbg_p3p, vha, 0xb030,
3465 "reset_owner is 0x%x\n", ha->portnum);
3466 } else if (IS_QLA8044(ha))
3467 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
3468 QLA8XXX_DEV_NEED_RESET);
3446 } else 3469 } else
3447 ql_log(ql_log_info, vha, 0xb031, 3470 ql_log(ql_log_info, vha, 0xb031,
3448 "Device state is 0x%x = %s.\n", 3471 "Device state is 0x%x = %s.\n",
@@ -3463,7 +3486,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3463int 3486int
3464qla82xx_abort_isp(scsi_qla_host_t *vha) 3487qla82xx_abort_isp(scsi_qla_host_t *vha)
3465{ 3488{
3466 int rval; 3489 int rval = -1;
3467 struct qla_hw_data *ha = vha->hw; 3490 struct qla_hw_data *ha = vha->hw;
3468 3491
3469 if (vha->device_flags & DFLG_DEV_FAILED) { 3492 if (vha->device_flags & DFLG_DEV_FAILED) {
@@ -3477,7 +3500,15 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3477 qla82xx_set_reset_owner(vha); 3500 qla82xx_set_reset_owner(vha);
3478 qla82xx_idc_unlock(ha); 3501 qla82xx_idc_unlock(ha);
3479 3502
3480 rval = qla82xx_device_state_handler(vha); 3503 if (IS_QLA82XX(ha))
3504 rval = qla82xx_device_state_handler(vha);
3505 else if (IS_QLA8044(ha)) {
3506 qla8044_idc_lock(ha);
3507 /* Decide the reset ownership */
3508 qla83xx_reset_ownership(vha);
3509 qla8044_idc_unlock(ha);
3510 rval = qla8044_device_state_handler(vha);
3511 }
3481 3512
3482 qla82xx_idc_lock(ha); 3513 qla82xx_idc_lock(ha);
3483 qla82xx_clear_rst_ready(ha); 3514 qla82xx_clear_rst_ready(ha);
@@ -3597,7 +3628,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3597void 3628void
3598qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) 3629qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3599{ 3630{
3600 int i; 3631 int i, fw_state = 0;
3601 unsigned long flags; 3632 unsigned long flags;
3602 struct qla_hw_data *ha = vha->hw; 3633 struct qla_hw_data *ha = vha->hw;
3603 3634
@@ -3608,7 +3639,11 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3608 if (!ha->flags.isp82xx_fw_hung) { 3639 if (!ha->flags.isp82xx_fw_hung) {
3609 for (i = 0; i < 2; i++) { 3640 for (i = 0; i < 2; i++) {
3610 msleep(1000); 3641 msleep(1000);
3611 if (qla82xx_check_fw_alive(vha)) { 3642 if (IS_QLA82XX(ha))
3643 fw_state = qla82xx_check_fw_alive(vha);
3644 else if (IS_QLA8044(ha))
3645 fw_state = qla8044_check_fw_alive(vha);
3646 if (fw_state) {
3612 ha->flags.isp82xx_fw_hung = 1; 3647 ha->flags.isp82xx_fw_hung = 1;
3613 qla82xx_clear_pending_mbx(vha); 3648 qla82xx_clear_pending_mbx(vha);
3614 break; 3649 break;
@@ -4072,7 +4107,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4072 return QLA_SUCCESS; 4107 return QLA_SUCCESS;
4073} 4108}
4074 4109
4075static int 4110int
4076qla82xx_validate_template_chksum(scsi_qla_host_t *vha) 4111qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
4077{ 4112{
4078 struct qla_hw_data *ha = vha->hw; 4113 struct qla_hw_data *ha = vha->hw;
@@ -4384,7 +4419,11 @@ qla82xx_md_prep(scsi_qla_host_t *vha)
4384 ha->md_template_size / 1024); 4419 ha->md_template_size / 1024);
4385 4420
4386 /* Get Minidump template */ 4421 /* Get Minidump template */
4387 rval = qla82xx_md_get_template(vha); 4422 if (IS_QLA8044(ha))
4423 rval = qla8044_md_get_template(vha);
4424 else
4425 rval = qla82xx_md_get_template(vha);
4426
4388 if (rval == QLA_SUCCESS) { 4427 if (rval == QLA_SUCCESS) {
4389 ql_dbg(ql_dbg_p3p, vha, 0xb04b, 4428 ql_dbg(ql_dbg_p3p, vha, 0xb04b,
4390 "MiniDump Template obtained\n"); 4429 "MiniDump Template obtained\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index d268e8406fdb..1bb93dbbccbb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -589,6 +589,7 @@
589 * The PCI VendorID and DeviceID for our board. 589 * The PCI VendorID and DeviceID for our board.
590 */ 590 */
591#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021 591#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
592#define PCI_DEVICE_ID_QLOGIC_ISP8044 0x8044
592 593
593#define QLA82XX_MSIX_TBL_SPACE 8192 594#define QLA82XX_MSIX_TBL_SPACE 8192
594#define QLA82XX_PCI_REG_MSIX_TBL 0x44 595#define QLA82XX_PCI_REG_MSIX_TBL 0x44
@@ -954,6 +955,11 @@ struct ct6_dsd {
954#define QLA82XX_CNTRL 98 955#define QLA82XX_CNTRL 98
955#define QLA82XX_TLHDR 99 956#define QLA82XX_TLHDR 99
956#define QLA82XX_RDEND 255 957#define QLA82XX_RDEND 255
958#define QLA8044_POLLRD 35
959#define QLA8044_RDMUX2 36
960#define QLA8044_L1DTG 8
961#define QLA8044_L1ITG 9
962#define QLA8044_POLLRDMWR 37
957 963
958/* 964/*
959 * Opcodes for Control Entries. 965 * Opcodes for Control Entries.
@@ -1191,4 +1197,8 @@ enum {
1191 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ 1197 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
1192 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ 1198 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
1193}; 1199};
1200
1201#define LEG_INTR_PTR_OFFSET 0x38C0
1202#define LEG_INTR_TRIG_OFFSET 0x38C4
1203#define LEG_INTR_MASK_OFFSET 0x38C8
1194#endif 1204#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
new file mode 100644
index 000000000000..8164cc9e7286
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -0,0 +1,3716 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8#include <linux/vmalloc.h>
9
10#include "qla_def.h"
11#include "qla_gbl.h"
12
13#include <linux/delay.h>
14
15/* 8044 Flash Read/Write functions */
16uint32_t
17qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
18{
19 return readl((void __iomem *) (ha->nx_pcibase + addr));
20}
21
22void
23qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
24{
25 writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
26}
27
28int
29qla8044_rd_direct(struct scsi_qla_host *vha,
30 const uint32_t crb_reg)
31{
32 struct qla_hw_data *ha = vha->hw;
33
34 if (crb_reg < CRB_REG_INDEX_MAX)
35 return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
36 else
37 return QLA_FUNCTION_FAILED;
38}
39
40void
41qla8044_wr_direct(struct scsi_qla_host *vha,
42 const uint32_t crb_reg,
43 const uint32_t value)
44{
45 struct qla_hw_data *ha = vha->hw;
46
47 if (crb_reg < CRB_REG_INDEX_MAX)
48 qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
49}
50
51static int
52qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
53{
54 uint32_t val;
55 int ret_val = QLA_SUCCESS;
56 struct qla_hw_data *ha = vha->hw;
57
58 qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
59 val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
60
61 if (val != addr) {
62 ql_log(ql_log_warn, vha, 0xb087,
63 "%s: Failed to set register window : "
64 "addr written 0x%x, read 0x%x!\n",
65 __func__, addr, val);
66 ret_val = QLA_FUNCTION_FAILED;
67 }
68 return ret_val;
69}
70
71static int
72qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
73{
74 int ret_val = QLA_SUCCESS;
75 struct qla_hw_data *ha = vha->hw;
76
77 ret_val = qla8044_set_win_base(vha, addr);
78 if (!ret_val)
79 *data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
80 else
81 ql_log(ql_log_warn, vha, 0xb088,
82 "%s: failed read of addr 0x%x!\n", __func__, addr);
83 return ret_val;
84}
85
86static int
87qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
88{
89 int ret_val = QLA_SUCCESS;
90 struct qla_hw_data *ha = vha->hw;
91
92 ret_val = qla8044_set_win_base(vha, addr);
93 if (!ret_val)
94 qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
95 else
96 ql_log(ql_log_warn, vha, 0xb089,
97 "%s: failed wrt to addr 0x%x, data 0x%x\n",
98 __func__, addr, data);
99 return ret_val;
100}
101
102/*
103 * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
104 *
105 * @ha : Pointer to adapter structure
106 * @raddr : CRB address to read from
107 * @waddr : CRB address to write to
108 *
109 */
110static void
111qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
112 uint32_t raddr, uint32_t waddr)
113{
114 uint32_t value;
115
116 qla8044_rd_reg_indirect(vha, raddr, &value);
117 qla8044_wr_reg_indirect(vha, waddr, value);
118}
119
120/*
121 * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
122 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
123 *
124 * @vha : Pointer to adapter structure
125 * @raddr : CRB address to read from
126 * @waddr : CRB address to write to
127 * @p_rmw_hdr : header with shift/or/xor values.
128 *
129 */
130static void
131qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
132 uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr)
133{
134 uint32_t value;
135
136 if (p_rmw_hdr->index_a)
137 value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
138 else
139 qla8044_rd_reg_indirect(vha, raddr, &value);
140 value &= p_rmw_hdr->test_mask;
141 value <<= p_rmw_hdr->shl;
142 value >>= p_rmw_hdr->shr;
143 value |= p_rmw_hdr->or_value;
144 value ^= p_rmw_hdr->xor_value;
145 qla8044_wr_reg_indirect(vha, waddr, value);
146 return;
147}
148
149inline void
150qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
151{
152 uint32_t qsnt_state;
153 struct qla_hw_data *ha = vha->hw;
154
155 qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
156 qsnt_state |= (1 << ha->portnum);
157 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
158 ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
159 __func__, vha->host_no, qsnt_state);
160}
161
162void
163qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
164{
165 uint32_t qsnt_state;
166 struct qla_hw_data *ha = vha->hw;
167
168 qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
169 qsnt_state &= ~(1 << ha->portnum);
170 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
171 ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
172 __func__, vha->host_no, qsnt_state);
173}
174
175/**
176 *
177 * qla8044_lock_recovery - Recovers the idc_lock.
178 * @ha : Pointer to adapter structure
179 *
180 * Lock Recovery Register
181 * 5-2 Lock recovery owner: Function ID of driver doing lock recovery,
182 * valid if bits 1..0 are set by driver doing lock recovery.
183 * 1-0 1 - Driver intends to force unlock the IDC lock.
184 * 2 - Driver is moving forward to unlock the IDC lock. Driver clears
185 * this field after force unlocking the IDC lock.
186 *
187 * Lock Recovery process
188 * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
189 * greater than 0, then wait for the other driver to unlock otherwise
190 * move to the next step.
191 * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
192 * register bits 1..0 and also set the function# in bits 5..2.
193 * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
194 * Wait for the other driver to perform lock recovery if the function
195 * number in bits 5..2 has changed, otherwise move to the next step.
196 * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
197 * leaving your function# in bits 5..2.
198 * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
199 * the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
200 **/
201static int
202qla8044_lock_recovery(struct scsi_qla_host *vha)
203{
204 uint32_t lock = 0, lockid;
205 struct qla_hw_data *ha = vha->hw;
206
207 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
208
209 /* Check for other Recovery in progress, go wait */
210 if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
211 return QLA_FUNCTION_FAILED;
212
213 /* Intent to Recover */
214 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
215 (ha->portnum <<
216 IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
217 msleep(200);
218
219 /* Check Intent to Recover is advertised */
220 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
221 if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
222 IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
223 return QLA_FUNCTION_FAILED;
224
225 ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
226 , __func__, ha->portnum);
227
228 /* Proceed to Recover */
229 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
230 (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
231 PROCEED_TO_RECOVER);
232
233 /* Force Unlock() */
234 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
235 qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
236
237 /* Clear bits 0-5 in IDC_RECOVERY register*/
238 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
239
240 /* Get lock() */
241 lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
242 if (lock) {
243 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
244 lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
245 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
246 return QLA_SUCCESS;
247 } else
248 return QLA_FUNCTION_FAILED;
249}
250
251int
252qla8044_idc_lock(struct qla_hw_data *ha)
253{
254 uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
255 uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
256 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
257
258 while (status == 0) {
259 /* acquire semaphore5 from PCI HW block */
260 status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
261
262 if (status) {
263 /* Increment Counter (8-31) and update func_num (0-7) on
264 * getting a successful lock */
265 lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
266 lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
267 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
268 break;
269 }
270
271 if (timeout == 0)
272 first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
273
274 if (++timeout >=
275 (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
276 tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
277 func_num = tmo_owner & 0xFF;
278 lock_cnt = tmo_owner >> 8;
279 ql_log(ql_log_warn, vha, 0xb114,
280 "%s: Lock by func %d failed after 2s, lock held "
281 "by func %d, lock count %d, first_owner %d\n",
282 __func__, ha->portnum, func_num, lock_cnt,
283 (first_owner & 0xFF));
284 if (first_owner != tmo_owner) {
285 /* Some other driver got lock,
286 * OR same driver got lock again (counter
287 * value changed), when we were waiting for
288 * lock. Retry for another 2 sec */
289 ql_dbg(ql_dbg_p3p, vha, 0xb115,
290 "%s: %d: IDC lock failed\n",
291 __func__, ha->portnum);
292 timeout = 0;
293 } else {
294 /* Same driver holding lock > 2sec.
295 * Force Recovery */
296 if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
297 /* Recovered and got lock */
298 ret_val = QLA_SUCCESS;
299 ql_dbg(ql_dbg_p3p, vha, 0xb116,
300 "%s:IDC lock Recovery by %d"
301 "successful...\n", __func__,
302 ha->portnum);
303 }
304 /* Recovery Failed, some other function
305 * has the lock, wait for 2secs
306 * and retry
307 */
308 ql_dbg(ql_dbg_p3p, vha, 0xb08a,
309 "%s: IDC lock Recovery by %d "
310 "failed, Retrying timout\n", __func__,
311 ha->portnum);
312 timeout = 0;
313 }
314 }
315 msleep(QLA8044_DRV_LOCK_MSLEEP);
316 }
317 return ret_val;
318}
319
320void
321qla8044_idc_unlock(struct qla_hw_data *ha)
322{
323 int id;
324 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
325
326 id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
327
328 if ((id & 0xFF) != ha->portnum) {
329 ql_log(ql_log_warn, vha, 0xb118,
330 "%s: IDC Unlock by %d failed, lock owner is %d!\n",
331 __func__, ha->portnum, (id & 0xFF));
332 return;
333 }
334
335 /* Keep lock counter value, update the ha->func_num to 0xFF */
336 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
337 qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
338}
339
340/* 8044 Flash Lock/Unlock functions */
341static int
342qla8044_flash_lock(scsi_qla_host_t *vha)
343{
344 int lock_owner;
345 int timeout = 0;
346 uint32_t lock_status = 0;
347 int ret_val = QLA_SUCCESS;
348 struct qla_hw_data *ha = vha->hw;
349
350 while (lock_status == 0) {
351 lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
352 if (lock_status)
353 break;
354
355 if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
356 lock_owner = qla8044_rd_reg(ha,
357 QLA8044_FLASH_LOCK_ID);
358 ql_log(ql_log_warn, vha, 0xb113,
359 "%s: flash lock by %d failed, held by %d\n",
360 __func__, ha->portnum, lock_owner);
361 ret_val = QLA_FUNCTION_FAILED;
362 break;
363 }
364 msleep(20);
365 }
366 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
367 return ret_val;
368}
369
370static void
371qla8044_flash_unlock(scsi_qla_host_t *vha)
372{
373 int ret_val;
374 struct qla_hw_data *ha = vha->hw;
375
376 /* Reading FLASH_UNLOCK register unlocks the Flash */
377 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
378 ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
379}
380
381
382static
383void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
384{
385
386 if (qla8044_flash_lock(vha)) {
387 /* Someone else is holding the lock. */
388 ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
389 }
390
391 /*
392 * Either we got the lock, or someone
393 * else died while holding it.
394 * In either case, unlock.
395 */
396 qla8044_flash_unlock(vha);
397}
398
399/*
400 * Address and length are byte address
401 */
402static int
403qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data,
404 uint32_t flash_addr, int u32_word_count)
405{
406 int i, ret_val = QLA_SUCCESS;
407 uint32_t u32_word;
408
409 if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
410 ret_val = QLA_FUNCTION_FAILED;
411 goto exit_lock_error;
412 }
413
414 if (flash_addr & 0x03) {
415 ql_log(ql_log_warn, vha, 0xb117,
416 "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
417 ret_val = QLA_FUNCTION_FAILED;
418 goto exit_flash_read;
419 }
420
421 for (i = 0; i < u32_word_count; i++) {
422 if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
423 (flash_addr & 0xFFFF0000))) {
424 ql_log(ql_log_warn, vha, 0xb119,
425 "%s: failed to write addr 0x%x to "
426 "FLASH_DIRECT_WINDOW\n! ",
427 __func__, flash_addr);
428 ret_val = QLA_FUNCTION_FAILED;
429 goto exit_flash_read;
430 }
431
432 ret_val = qla8044_rd_reg_indirect(vha,
433 QLA8044_FLASH_DIRECT_DATA(flash_addr),
434 &u32_word);
435 if (ret_val != QLA_SUCCESS) {
436 ql_log(ql_log_warn, vha, 0xb08c,
437 "%s: failed to read addr 0x%x!\n",
438 __func__, flash_addr);
439 goto exit_flash_read;
440 }
441
442 *(uint32_t *)p_data = u32_word;
443 p_data = p_data + 4;
444 flash_addr = flash_addr + 4;
445 }
446
447exit_flash_read:
448 qla8044_flash_unlock(vha);
449
450exit_lock_error:
451 return ret_val;
452}
453
454/*
455 * Address and length are byte address
456 */
457uint8_t *
458qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
459 uint32_t offset, uint32_t length)
460{
461 scsi_block_requests(vha->host);
462 if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
463 != QLA_SUCCESS) {
464 ql_log(ql_log_warn, vha, 0xb08d,
465 "%s: Failed to read from flash\n",
466 __func__);
467 }
468 scsi_unblock_requests(vha->host);
469 return buf;
470}
471
472inline int
473qla8044_need_reset(struct scsi_qla_host *vha)
474{
475 uint32_t drv_state, drv_active;
476 int rval;
477 struct qla_hw_data *ha = vha->hw;
478
479 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
480 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
481
482 rval = drv_state & (1 << ha->portnum);
483
484 if (ha->flags.eeh_busy && drv_active)
485 rval = 1;
486 return rval;
487}
488
489/*
490 * qla8044_write_list - Write the value (p_entry->arg2) to address specified
491 * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
492 * entries.
493 *
494 * @vha : Pointer to adapter structure
495 * @p_hdr : reset_entry header for WRITE_LIST opcode.
496 *
497 */
498static void
499qla8044_write_list(struct scsi_qla_host *vha,
500 struct qla8044_reset_entry_hdr *p_hdr)
501{
502 struct qla8044_entry *p_entry;
503 uint32_t i;
504
505 p_entry = (struct qla8044_entry *)((char *)p_hdr +
506 sizeof(struct qla8044_reset_entry_hdr));
507
508 for (i = 0; i < p_hdr->count; i++, p_entry++) {
509 qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
510 if (p_hdr->delay)
511 udelay((uint32_t)(p_hdr->delay));
512 }
513}
514
515/*
516 * qla8044_read_write_list - Read from address specified by p_entry->arg1,
517 * write value read to address specified by p_entry->arg2, for all entries in
518 * header with delay of p_hdr->delay between entries.
519 *
520 * @vha : Pointer to adapter structure
521 * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
522 *
523 */
524static void
525qla8044_read_write_list(struct scsi_qla_host *vha,
526 struct qla8044_reset_entry_hdr *p_hdr)
527{
528 struct qla8044_entry *p_entry;
529 uint32_t i;
530
531 p_entry = (struct qla8044_entry *)((char *)p_hdr +
532 sizeof(struct qla8044_reset_entry_hdr));
533
534 for (i = 0; i < p_hdr->count; i++, p_entry++) {
535 qla8044_read_write_crb_reg(vha, p_entry->arg1,
536 p_entry->arg2);
537 if (p_hdr->delay)
538 udelay((uint32_t)(p_hdr->delay));
539 }
540}
541
542/*
543 * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
544 * value read ANDed with test_mask is equal to test_result.
545 *
546 * @ha : Pointer to adapter structure
547 * @addr : CRB register address
548 * @duration : Poll for total of "duration" msecs
549 * @test_mask : Mask value read with "test_mask"
550 * @test_result : Compare (value&test_mask) with test_result.
551 *
552 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
553 */
554static int
555qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
556 int duration, uint32_t test_mask, uint32_t test_result)
557{
558 uint32_t value;
559 int timeout_error;
560 uint8_t retries;
561 int ret_val = QLA_SUCCESS;
562
563 ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
564 if (ret_val == QLA_FUNCTION_FAILED) {
565 timeout_error = 1;
566 goto exit_poll_reg;
567 }
568
569 /* poll every 1/10 of the total duration */
570 retries = duration/10;
571
572 do {
573 if ((value & test_mask) != test_result) {
574 timeout_error = 1;
575 msleep(duration/10);
576 ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
577 if (ret_val == QLA_FUNCTION_FAILED) {
578 timeout_error = 1;
579 goto exit_poll_reg;
580 }
581 } else {
582 timeout_error = 0;
583 break;
584 }
585 } while (retries--);
586
587exit_poll_reg:
588 if (timeout_error) {
589 vha->reset_tmplt.seq_error++;
590 ql_log(ql_log_fatal, vha, 0xb090,
591 "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
592 __func__, value, test_mask, test_result);
593 }
594
595 return timeout_error;
596}
597
598/*
599 * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
600 * register specified by p_entry->arg1 and compare (value AND test_mask) with
601 * test_result to validate it. Wait for p_hdr->delay between processing entries.
602 *
603 * @ha : Pointer to adapter structure
604 * @p_hdr : reset_entry header for POLL_LIST opcode.
605 *
606 */
607static void
608qla8044_poll_list(struct scsi_qla_host *vha,
609 struct qla8044_reset_entry_hdr *p_hdr)
610{
611 long delay;
612 struct qla8044_entry *p_entry;
613 struct qla8044_poll *p_poll;
614 uint32_t i;
615 uint32_t value;
616
617 p_poll = (struct qla8044_poll *)
618 ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
619
620 /* Entries start after 8 byte qla8044_poll, poll header contains
621 * the test_mask, test_value.
622 */
623 p_entry = (struct qla8044_entry *)((char *)p_poll +
624 sizeof(struct qla8044_poll));
625
626 delay = (long)p_hdr->delay;
627
628 if (!delay) {
629 for (i = 0; i < p_hdr->count; i++, p_entry++)
630 qla8044_poll_reg(vha, p_entry->arg1,
631 delay, p_poll->test_mask, p_poll->test_value);
632 } else {
633 for (i = 0; i < p_hdr->count; i++, p_entry++) {
634 if (delay) {
635 if (qla8044_poll_reg(vha,
636 p_entry->arg1, delay,
637 p_poll->test_mask,
638 p_poll->test_value)) {
639 /*If
640 * (data_read&test_mask != test_value)
641 * read TIMEOUT_ADDR (arg1) and
642 * ADDR (arg2) registers
643 */
644 qla8044_rd_reg_indirect(vha,
645 p_entry->arg1, &value);
646 qla8044_rd_reg_indirect(vha,
647 p_entry->arg2, &value);
648 }
649 }
650 }
651 }
652}
653
654/*
655 * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
656 * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
657 * expires.
658 *
659 * @vha : Pointer to adapter structure
660 * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
661 *
662 */
663static void
664qla8044_poll_write_list(struct scsi_qla_host *vha,
665 struct qla8044_reset_entry_hdr *p_hdr)
666{
667 long delay;
668 struct qla8044_quad_entry *p_entry;
669 struct qla8044_poll *p_poll;
670 uint32_t i;
671
672 p_poll = (struct qla8044_poll *)((char *)p_hdr +
673 sizeof(struct qla8044_reset_entry_hdr));
674
675 p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
676 sizeof(struct qla8044_poll));
677
678 delay = (long)p_hdr->delay;
679
680 for (i = 0; i < p_hdr->count; i++, p_entry++) {
681 qla8044_wr_reg_indirect(vha,
682 p_entry->dr_addr, p_entry->dr_value);
683 qla8044_wr_reg_indirect(vha,
684 p_entry->ar_addr, p_entry->ar_value);
685 if (delay) {
686 if (qla8044_poll_reg(vha,
687 p_entry->ar_addr, delay,
688 p_poll->test_mask,
689 p_poll->test_value)) {
690 ql_dbg(ql_dbg_p3p, vha, 0xb091,
691 "%s: Timeout Error: poll list, ",
692 __func__);
693 ql_dbg(ql_dbg_p3p, vha, 0xb092,
694 "item_num %d, entry_num %d\n", i,
695 vha->reset_tmplt.seq_index);
696 }
697 }
698 }
699}
700
701/*
702 * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
703 * value, write value to p_entry->arg2. Process entries with p_hdr->delay
704 * between entries.
705 *
706 * @vha : Pointer to adapter structure
707 * @p_hdr : header with shift/or/xor values.
708 *
709 */
710static void
711qla8044_read_modify_write(struct scsi_qla_host *vha,
712 struct qla8044_reset_entry_hdr *p_hdr)
713{
714 struct qla8044_entry *p_entry;
715 struct qla8044_rmw *p_rmw_hdr;
716 uint32_t i;
717
718 p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
719 sizeof(struct qla8044_reset_entry_hdr));
720
721 p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
722 sizeof(struct qla8044_rmw));
723
724 for (i = 0; i < p_hdr->count; i++, p_entry++) {
725 qla8044_rmw_crb_reg(vha, p_entry->arg1,
726 p_entry->arg2, p_rmw_hdr);
727 if (p_hdr->delay)
728 udelay((uint32_t)(p_hdr->delay));
729 }
730}
731
732/*
733 * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
734 * two entries of a sequence.
735 *
736 * @vha : Pointer to adapter structure
737 * @p_hdr : Common reset entry header.
738 *
739 */
740static
741void qla8044_pause(struct scsi_qla_host *vha,
742 struct qla8044_reset_entry_hdr *p_hdr)
743{
744 if (p_hdr->delay)
745 mdelay((uint32_t)((long)p_hdr->delay));
746}
747
748/*
749 * qla8044_template_end - Indicates end of reset sequence processing.
750 *
751 * @vha : Pointer to adapter structure
752 * @p_hdr : Common reset entry header.
753 *
754 */
755static void
756qla8044_template_end(struct scsi_qla_host *vha,
757 struct qla8044_reset_entry_hdr *p_hdr)
758{
759 vha->reset_tmplt.template_end = 1;
760
761 if (vha->reset_tmplt.seq_error == 0) {
762 ql_dbg(ql_dbg_p3p, vha, 0xb093,
763 "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
764 } else {
765 ql_log(ql_log_fatal, vha, 0xb094,
766 "%s: Reset sequence completed with some timeout "
767 "errors.\n", __func__);
768 }
769}
770
771/*
772 * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
773 * if (value & test_mask != test_value) re-read till timeout value expires,
774 * read dr_addr register and assign to reset_tmplt.array.
775 *
776 * @vha : Pointer to adapter structure
777 * @p_hdr : Common reset entry header.
778 *
779 */
780static void
781qla8044_poll_read_list(struct scsi_qla_host *vha,
782 struct qla8044_reset_entry_hdr *p_hdr)
783{
784 long delay;
785 int index;
786 struct qla8044_quad_entry *p_entry;
787 struct qla8044_poll *p_poll;
788 uint32_t i;
789 uint32_t value;
790
791 p_poll = (struct qla8044_poll *)
792 ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
793
794 p_entry = (struct qla8044_quad_entry *)
795 ((char *)p_poll + sizeof(struct qla8044_poll));
796
797 delay = (long)p_hdr->delay;
798
799 for (i = 0; i < p_hdr->count; i++, p_entry++) {
800 qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
801 p_entry->ar_value);
802 if (delay) {
803 if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
804 p_poll->test_mask, p_poll->test_value)) {
805 ql_dbg(ql_dbg_p3p, vha, 0xb095,
806 "%s: Timeout Error: poll "
807 "list, ", __func__);
808 ql_dbg(ql_dbg_p3p, vha, 0xb096,
809 "Item_num %d, "
810 "entry_num %d\n", i,
811 vha->reset_tmplt.seq_index);
812 } else {
813 index = vha->reset_tmplt.array_index;
814 qla8044_rd_reg_indirect(vha,
815 p_entry->dr_addr, &value);
816 vha->reset_tmplt.array[index++] = value;
817 if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
818 vha->reset_tmplt.array_index = 1;
819 }
820 }
821 }
822}
823
824/*
825 * qla8031_process_reset_template - Process all entries in reset template
826 * till entry with SEQ_END opcode, which indicates end of the reset template
827 * processing. Each entry has a Reset Entry header, entry opcode/command, with
828 * size of the entry, number of entries in sub-sequence and delay in microsecs
829 * or timeout in millisecs.
830 *
831 * @ha : Pointer to adapter structure
832 * @p_buff : Common reset entry header.
833 *
834 */
835static void
836qla8044_process_reset_template(struct scsi_qla_host *vha,
837 char *p_buff)
838{
839 int index, entries;
840 struct qla8044_reset_entry_hdr *p_hdr;
841 char *p_entry = p_buff;
842
843 vha->reset_tmplt.seq_end = 0;
844 vha->reset_tmplt.template_end = 0;
845 entries = vha->reset_tmplt.hdr->entries;
846 index = vha->reset_tmplt.seq_index;
847
848 for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) {
849 p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
850 switch (p_hdr->cmd) {
851 case OPCODE_NOP:
852 break;
853 case OPCODE_WRITE_LIST:
854 qla8044_write_list(vha, p_hdr);
855 break;
856 case OPCODE_READ_WRITE_LIST:
857 qla8044_read_write_list(vha, p_hdr);
858 break;
859 case OPCODE_POLL_LIST:
860 qla8044_poll_list(vha, p_hdr);
861 break;
862 case OPCODE_POLL_WRITE_LIST:
863 qla8044_poll_write_list(vha, p_hdr);
864 break;
865 case OPCODE_READ_MODIFY_WRITE:
866 qla8044_read_modify_write(vha, p_hdr);
867 break;
868 case OPCODE_SEQ_PAUSE:
869 qla8044_pause(vha, p_hdr);
870 break;
871 case OPCODE_SEQ_END:
872 vha->reset_tmplt.seq_end = 1;
873 break;
874 case OPCODE_TMPL_END:
875 qla8044_template_end(vha, p_hdr);
876 break;
877 case OPCODE_POLL_READ_LIST:
878 qla8044_poll_read_list(vha, p_hdr);
879 break;
880 default:
881 ql_log(ql_log_fatal, vha, 0xb097,
882 "%s: Unknown command ==> 0x%04x on "
883 "entry = %d\n", __func__, p_hdr->cmd, index);
884 break;
885 }
886 /*
887 *Set pointer to next entry in the sequence.
888 */
889 p_entry += p_hdr->size;
890 }
891 vha->reset_tmplt.seq_index = index;
892}
893
894static void
895qla8044_process_init_seq(struct scsi_qla_host *vha)
896{
897 qla8044_process_reset_template(vha,
898 vha->reset_tmplt.init_offset);
899 if (vha->reset_tmplt.seq_end != 1)
900 ql_log(ql_log_fatal, vha, 0xb098,
901 "%s: Abrupt INIT Sub-Sequence end.\n",
902 __func__);
903}
904
905static void
906qla8044_process_stop_seq(struct scsi_qla_host *vha)
907{
908 vha->reset_tmplt.seq_index = 0;
909 qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
910 if (vha->reset_tmplt.seq_end != 1)
911 ql_log(ql_log_fatal, vha, 0xb099,
912 "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
913}
914
915static void
916qla8044_process_start_seq(struct scsi_qla_host *vha)
917{
918 qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
919 if (vha->reset_tmplt.template_end != 1)
920 ql_log(ql_log_fatal, vha, 0xb09a,
921 "%s: Abrupt START Sub-Sequence end.\n",
922 __func__);
923}
924
925static int
926qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
927 uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
928{
929 uint32_t i;
930 uint32_t u32_word;
931 uint32_t flash_offset;
932 uint32_t addr = flash_addr;
933 int ret_val = QLA_SUCCESS;
934
935 flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
936
937 if (addr & 0x3) {
938 ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
939 __func__, addr);
940 ret_val = QLA_FUNCTION_FAILED;
941 goto exit_lockless_read;
942 }
943
944 ret_val = qla8044_wr_reg_indirect(vha,
945 QLA8044_FLASH_DIRECT_WINDOW, (addr));
946
947 if (ret_val != QLA_SUCCESS) {
948 ql_log(ql_log_fatal, vha, 0xb09c,
949 "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
950 __func__, addr);
951 goto exit_lockless_read;
952 }
953
954 /* Check if data is spread across multiple sectors */
955 if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
956 (QLA8044_FLASH_SECTOR_SIZE - 1)) {
957 /* Multi sector read */
958 for (i = 0; i < u32_word_count; i++) {
959 ret_val = qla8044_rd_reg_indirect(vha,
960 QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
961 if (ret_val != QLA_SUCCESS) {
962 ql_log(ql_log_fatal, vha, 0xb09d,
963 "%s: failed to read addr 0x%x!\n",
964 __func__, addr);
965 goto exit_lockless_read;
966 }
967 *(uint32_t *)p_data = u32_word;
968 p_data = p_data + 4;
969 addr = addr + 4;
970 flash_offset = flash_offset + 4;
971 if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
972 /* This write is needed once for each sector */
973 ret_val = qla8044_wr_reg_indirect(vha,
974 QLA8044_FLASH_DIRECT_WINDOW, (addr));
975 if (ret_val != QLA_SUCCESS) {
976 ql_log(ql_log_fatal, vha, 0xb09f,
977 "%s: failed to write addr "
978 "0x%x to FLASH_DIRECT_WINDOW!\n",
979 __func__, addr);
980 goto exit_lockless_read;
981 }
982 flash_offset = 0;
983 }
984 }
985 } else {
986 /* Single sector read */
987 for (i = 0; i < u32_word_count; i++) {
988 ret_val = qla8044_rd_reg_indirect(vha,
989 QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
990 if (ret_val != QLA_SUCCESS) {
991 ql_log(ql_log_fatal, vha, 0xb0a0,
992 "%s: failed to read addr 0x%x!\n",
993 __func__, addr);
994 goto exit_lockless_read;
995 }
996 *(uint32_t *)p_data = u32_word;
997 p_data = p_data + 4;
998 addr = addr + 4;
999 }
1000 }
1001
1002exit_lockless_read:
1003 return ret_val;
1004}
1005
1006/*
1007 * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
1008 *
1009 * @vha : Pointer to adapter structure
1010 * addr : Flash address to write to
1011 * data : Data to be written
1012 * count : word_count to be written
1013 *
1014 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1015 */
1016static int
1017qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
1018 uint64_t addr, uint32_t *data, uint32_t count)
1019{
1020 int i, j, ret_val = QLA_SUCCESS;
1021 uint32_t agt_ctrl;
1022 unsigned long flags;
1023 struct qla_hw_data *ha = vha->hw;
1024
1025 /* Only 128-bit aligned access */
1026 if (addr & 0xF) {
1027 ret_val = QLA_FUNCTION_FAILED;
1028 goto exit_ms_mem_write;
1029 }
1030 write_lock_irqsave(&ha->hw_lock, flags);
1031
1032 /* Write address */
1033 ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1034 if (ret_val == QLA_FUNCTION_FAILED) {
1035 ql_log(ql_log_fatal, vha, 0xb0a1,
1036 "%s: write to AGT_ADDR_HI failed!\n", __func__);
1037 goto exit_ms_mem_write_unlock;
1038 }
1039
1040 for (i = 0; i < count; i++, addr += 16) {
1041 if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
1042 QLA8044_ADDR_QDR_NET_MAX)) ||
1043 (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
1044 QLA8044_ADDR_DDR_NET_MAX)))) {
1045 ret_val = QLA_FUNCTION_FAILED;
1046 goto exit_ms_mem_write_unlock;
1047 }
1048
1049 ret_val = qla8044_wr_reg_indirect(vha,
1050 MD_MIU_TEST_AGT_ADDR_LO, addr);
1051
1052 /* Write data */
1053 ret_val += qla8044_wr_reg_indirect(vha,
1054 MD_MIU_TEST_AGT_WRDATA_LO, *data++);
1055 ret_val += qla8044_wr_reg_indirect(vha,
1056 MD_MIU_TEST_AGT_WRDATA_HI, *data++);
1057 ret_val += qla8044_wr_reg_indirect(vha,
1058 MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
1059 ret_val += qla8044_wr_reg_indirect(vha,
1060 MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
1061 if (ret_val == QLA_FUNCTION_FAILED) {
1062 ql_log(ql_log_fatal, vha, 0xb0a2,
1063 "%s: write to AGT_WRDATA failed!\n",
1064 __func__);
1065 goto exit_ms_mem_write_unlock;
1066 }
1067
1068 /* Check write status */
1069 ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1070 MIU_TA_CTL_WRITE_ENABLE);
1071 ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1072 MIU_TA_CTL_WRITE_START);
1073 if (ret_val == QLA_FUNCTION_FAILED) {
1074 ql_log(ql_log_fatal, vha, 0xb0a3,
1075 "%s: write to AGT_CTRL failed!\n", __func__);
1076 goto exit_ms_mem_write_unlock;
1077 }
1078
1079 for (j = 0; j < MAX_CTL_CHECK; j++) {
1080 ret_val = qla8044_rd_reg_indirect(vha,
1081 MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
1082 if (ret_val == QLA_FUNCTION_FAILED) {
1083 ql_log(ql_log_fatal, vha, 0xb0a4,
1084 "%s: failed to read "
1085 "MD_MIU_TEST_AGT_CTRL!\n", __func__);
1086 goto exit_ms_mem_write_unlock;
1087 }
1088 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1089 break;
1090 }
1091
1092 /* Status check failed */
1093 if (j >= MAX_CTL_CHECK) {
1094 ql_log(ql_log_fatal, vha, 0xb0a5,
1095 "%s: MS memory write failed!\n",
1096 __func__);
1097 ret_val = QLA_FUNCTION_FAILED;
1098 goto exit_ms_mem_write_unlock;
1099 }
1100 }
1101
1102exit_ms_mem_write_unlock:
1103 write_unlock_irqrestore(&ha->hw_lock, flags);
1104
1105exit_ms_mem_write:
1106 return ret_val;
1107}
1108
1109static int
1110qla8044_copy_bootloader(struct scsi_qla_host *vha)
1111{
1112 uint8_t *p_cache;
1113 uint32_t src, count, size;
1114 uint64_t dest;
1115 int ret_val = QLA_SUCCESS;
1116 struct qla_hw_data *ha = vha->hw;
1117
1118 src = QLA8044_BOOTLOADER_FLASH_ADDR;
1119 dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
1120 size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
1121
1122 /* 128 bit alignment check */
1123 if (size & 0xF)
1124 size = (size + 16) & ~0xF;
1125
1126 /* 16 byte count */
1127 count = size/16;
1128
1129 p_cache = vmalloc(size);
1130 if (p_cache == NULL) {
1131 ql_log(ql_log_fatal, vha, 0xb0a6,
1132 "%s: Failed to allocate memory for "
1133 "boot loader cache\n", __func__);
1134 ret_val = QLA_FUNCTION_FAILED;
1135 goto exit_copy_bootloader;
1136 }
1137
1138 ret_val = qla8044_lockless_flash_read_u32(vha, src,
1139 p_cache, size/sizeof(uint32_t));
1140 if (ret_val == QLA_FUNCTION_FAILED) {
1141 ql_log(ql_log_fatal, vha, 0xb0a7,
1142 "%s: Error reading F/W from flash!!!\n", __func__);
1143 goto exit_copy_error;
1144 }
1145 ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
1146 __func__);
1147
1148 /* 128 bit/16 byte write to MS memory */
1149 ret_val = qla8044_ms_mem_write_128b(vha, dest,
1150 (uint32_t *)p_cache, count);
1151 if (ret_val == QLA_FUNCTION_FAILED) {
1152 ql_log(ql_log_fatal, vha, 0xb0a9,
1153 "%s: Error writing F/W to MS !!!\n", __func__);
1154 goto exit_copy_error;
1155 }
1156 ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
1157 "%s: Wrote F/W (size %d) to MS !!!\n",
1158 __func__, size);
1159
1160exit_copy_error:
1161 vfree(p_cache);
1162
1163exit_copy_bootloader:
1164 return ret_val;
1165}
1166
1167static int
1168qla8044_restart(struct scsi_qla_host *vha)
1169{
1170 int ret_val = QLA_SUCCESS;
1171 struct qla_hw_data *ha = vha->hw;
1172
1173 qla8044_process_stop_seq(vha);
1174
1175 /* Collect minidump */
1176 if (ql2xmdenable)
1177 qla8044_get_minidump(vha);
1178 else
1179 ql_log(ql_log_fatal, vha, 0xb14c,
1180 "Minidump disabled.\n");
1181
1182 qla8044_process_init_seq(vha);
1183
1184 if (qla8044_copy_bootloader(vha)) {
1185 ql_log(ql_log_fatal, vha, 0xb0ab,
1186 "%s: Copy bootloader, firmware restart failed!\n",
1187 __func__);
1188 ret_val = QLA_FUNCTION_FAILED;
1189 goto exit_restart;
1190 }
1191
1192 /*
1193 * Loads F/W from flash
1194 */
1195 qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
1196
1197 qla8044_process_start_seq(vha);
1198
1199exit_restart:
1200 return ret_val;
1201}
1202
1203/*
1204 * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
1205 * initialized.
1206 *
1207 * @ha : Pointer to adapter structure
1208 *
1209 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1210 */
1211static int
1212qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
1213{
1214 uint32_t val, ret_val = QLA_FUNCTION_FAILED;
1215 int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
1216 struct qla_hw_data *ha = vha->hw;
1217
1218 do {
1219 val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
1220 if (val == PHAN_INITIALIZE_COMPLETE) {
1221 ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
1222 "%s: Command Peg initialization "
1223 "complete! state=0x%x\n", __func__, val);
1224 ret_val = QLA_SUCCESS;
1225 break;
1226 }
1227 msleep(CRB_CMDPEG_CHECK_DELAY);
1228 } while (--retries);
1229
1230 return ret_val;
1231}
1232
1233static int
1234qla8044_start_firmware(struct scsi_qla_host *vha)
1235{
1236 int ret_val = QLA_SUCCESS;
1237
1238 if (qla8044_restart(vha)) {
1239 ql_log(ql_log_fatal, vha, 0xb0ad,
1240 "%s: Restart Error!!!, Need Reset!!!\n",
1241 __func__);
1242 ret_val = QLA_FUNCTION_FAILED;
1243 goto exit_start_fw;
1244 } else
1245 ql_dbg(ql_dbg_p3p, vha, 0xb0af,
1246 "%s: Restart done!\n", __func__);
1247
1248 ret_val = qla8044_check_cmd_peg_status(vha);
1249 if (ret_val) {
1250 ql_log(ql_log_fatal, vha, 0xb0b0,
1251 "%s: Peg not initialized!\n", __func__);
1252 ret_val = QLA_FUNCTION_FAILED;
1253 }
1254
1255exit_start_fw:
1256 return ret_val;
1257}
1258
1259void
1260qla8044_clear_drv_active(struct scsi_qla_host *vha)
1261{
1262 uint32_t drv_active;
1263 struct qla_hw_data *ha = vha->hw;
1264
1265 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1266 drv_active &= ~(1 << (ha->portnum));
1267
1268 ql_log(ql_log_info, vha, 0xb0b1,
1269 "%s(%ld): drv_active: 0x%08x\n",
1270 __func__, vha->host_no, drv_active);
1271
1272 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1273}
1274
1275/*
1276 * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
1277 * @ha: pointer to adapter structure
1278 *
1279 * Note: IDC lock must be held upon entry
1280 **/
1281static int
1282qla8044_device_bootstrap(struct scsi_qla_host *vha)
1283{
1284 int rval = QLA_FUNCTION_FAILED;
1285 int i;
1286 uint32_t old_count = 0, count = 0;
1287 int need_reset = 0;
1288 uint32_t idc_ctrl;
1289 struct qla_hw_data *ha = vha->hw;
1290
1291 need_reset = qla8044_need_reset(vha);
1292
1293 if (!need_reset) {
1294 old_count = qla8044_rd_direct(vha,
1295 QLA8044_PEG_ALIVE_COUNTER_INDEX);
1296
1297 for (i = 0; i < 10; i++) {
1298 msleep(200);
1299
1300 count = qla8044_rd_direct(vha,
1301 QLA8044_PEG_ALIVE_COUNTER_INDEX);
1302 if (count != old_count) {
1303 rval = QLA_SUCCESS;
1304 goto dev_ready;
1305 }
1306 }
1307 qla8044_flash_lock_recovery(vha);
1308 } else {
1309 /* We are trying to perform a recovery here. */
1310 if (ha->flags.isp82xx_fw_hung)
1311 qla8044_flash_lock_recovery(vha);
1312 }
1313
1314 /* set to DEV_INITIALIZING */
1315 ql_log(ql_log_info, vha, 0xb0b2,
1316 "%s: HW State: INITIALIZING\n", __func__);
1317 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1318 QLA8XXX_DEV_INITIALIZING);
1319
1320 qla8044_idc_unlock(ha);
1321 rval = qla8044_start_firmware(vha);
1322 qla8044_idc_lock(ha);
1323
1324 if (rval != QLA_SUCCESS) {
1325 ql_log(ql_log_info, vha, 0xb0b3,
1326 "%s: HW State: FAILED\n", __func__);
1327 qla8044_clear_drv_active(vha);
1328 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1329 QLA8XXX_DEV_FAILED);
1330 return rval;
1331 }
1332
1333 /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
1334 * device goes to INIT state. */
1335 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1336 if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1337 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
1338 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1339 ha->fw_dumped = 0;
1340 }
1341
1342dev_ready:
1343 ql_log(ql_log_info, vha, 0xb0b4,
1344 "%s: HW State: READY\n", __func__);
1345 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
1346
1347 return rval;
1348}
1349
1350/*-------------------------Reset Sequence Functions-----------------------*/
1351static void
1352qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
1353{
1354 u8 *phdr;
1355
1356 if (!vha->reset_tmplt.buff) {
1357 ql_log(ql_log_fatal, vha, 0xb0b5,
1358 "%s: Error Invalid reset_seq_template\n", __func__);
1359 return;
1360 }
1361
1362 phdr = vha->reset_tmplt.buff;
1363 ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
1364 "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
1365 "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
1366 "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
1367 *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
1368 *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
1369 *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
1370 *(phdr+13), *(phdr+14), *(phdr+15));
1371}
1372
1373/*
1374 * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
1375 *
1376 * @ha : Pointer to adapter structure
1377 *
1378 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1379 */
1380static int
1381qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
1382{
1383 uint32_t sum = 0;
1384 uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
1385 int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t);
1386
1387 while (u16_count-- > 0)
1388 sum += *buff++;
1389
1390 while (sum >> 16)
1391 sum = (sum & 0xFFFF) + (sum >> 16);
1392
1393 /* checksum of 0 indicates a valid template */
1394 if (~sum) {
1395 return QLA_SUCCESS;
1396 } else {
1397 ql_log(ql_log_fatal, vha, 0xb0b7,
1398 "%s: Reset seq checksum failed\n", __func__);
1399 return QLA_FUNCTION_FAILED;
1400 }
1401}
1402
1403/*
1404 * qla8044_read_reset_template - Read Reset Template from Flash, validate
1405 * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
1406 *
1407 * @ha : Pointer to adapter structure
1408 */
1409void
1410qla8044_read_reset_template(struct scsi_qla_host *vha)
1411{
1412 uint8_t *p_buff;
1413 uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
1414
1415 vha->reset_tmplt.seq_error = 0;
1416 vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
1417 if (vha->reset_tmplt.buff == NULL) {
1418 ql_log(ql_log_fatal, vha, 0xb0b8,
1419 "%s: Failed to allocate reset template resources\n",
1420 __func__);
1421 goto exit_read_reset_template;
1422 }
1423
1424 p_buff = vha->reset_tmplt.buff;
1425 addr = QLA8044_RESET_TEMPLATE_ADDR;
1426
1427 tmplt_hdr_def_size =
1428 sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
1429
1430 ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
1431 "%s: Read template hdr size %d from Flash\n",
1432 __func__, tmplt_hdr_def_size);
1433
1434 /* Copy template header from flash */
1435 if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1436 ql_log(ql_log_fatal, vha, 0xb0ba,
1437 "%s: Failed to read reset template\n", __func__);
1438 goto exit_read_template_error;
1439 }
1440
1441 vha->reset_tmplt.hdr =
1442 (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
1443
1444 /* Validate the template header size and signature */
1445 tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
1446 if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
1447 (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
1448 ql_log(ql_log_fatal, vha, 0xb0bb,
1449 "%s: Template Header size invalid %d "
1450 "tmplt_hdr_def_size %d!!!\n", __func__,
1451 tmplt_hdr_size, tmplt_hdr_def_size);
1452 goto exit_read_template_error;
1453 }
1454
1455 addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
1456 p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
1457 tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
1458 vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
1459
1460 ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
1461 "%s: Read rest of the template size %d\n",
1462 __func__, vha->reset_tmplt.hdr->size);
1463
1464 /* Copy rest of the template */
1465 if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1466 ql_log(ql_log_fatal, vha, 0xb0bd,
1467 "%s: Failed to read reset tempelate\n", __func__);
1468 goto exit_read_template_error;
1469 }
1470
1471 /* Integrity check */
1472 if (qla8044_reset_seq_checksum_test(vha)) {
1473 ql_log(ql_log_fatal, vha, 0xb0be,
1474 "%s: Reset Seq checksum failed!\n", __func__);
1475 goto exit_read_template_error;
1476 }
1477
1478 ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
1479 "%s: Reset Seq checksum passed! Get stop, "
1480 "start and init seq offsets\n", __func__);
1481
1482 /* Get STOP, START, INIT sequence offsets */
1483 vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
1484 vha->reset_tmplt.hdr->init_seq_offset;
1485
1486 vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
1487 vha->reset_tmplt.hdr->start_seq_offset;
1488
1489 vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
1490 vha->reset_tmplt.hdr->hdr_size;
1491
1492 qla8044_dump_reset_seq_hdr(vha);
1493
1494 goto exit_read_reset_template;
1495
1496exit_read_template_error:
1497 vfree(vha->reset_tmplt.buff);
1498
1499exit_read_reset_template:
1500 return;
1501}
1502
1503void
1504qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
1505{
1506 uint32_t idc_ctrl;
1507 struct qla_hw_data *ha = vha->hw;
1508
1509 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1510 idc_ctrl |= DONTRESET_BIT0;
1511 ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
1512 "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
1513 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1514}
1515
1516inline void
1517qla8044_set_rst_ready(struct scsi_qla_host *vha)
1518{
1519 uint32_t drv_state;
1520 struct qla_hw_data *ha = vha->hw;
1521
1522 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1523
1524 /* For ISP8044, drv_active register has 1 bit per function,
1525 * shift 1 by func_num to set a bit for the function.*/
1526 drv_state |= (1 << ha->portnum);
1527
1528 ql_log(ql_log_info, vha, 0xb0c1,
1529 "%s(%ld): drv_state: 0x%08x\n",
1530 __func__, vha->host_no, drv_state);
1531 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
1532}
1533
1534/**
1535 * qla8044_need_reset_handler - Code to start reset sequence
1536 * @ha: pointer to adapter structure
1537 *
1538 * Note: IDC lock must be held upon entry
1539 **/
1540static void
1541qla8044_need_reset_handler(struct scsi_qla_host *vha)
1542{
1543 uint32_t dev_state = 0, drv_state, drv_active;
1544 unsigned long reset_timeout, dev_init_timeout;
1545 struct qla_hw_data *ha = vha->hw;
1546
1547 ql_log(ql_log_fatal, vha, 0xb0c2,
1548 "%s: Performing ISP error recovery\n", __func__);
1549
1550 if (vha->flags.online) {
1551 qla8044_idc_unlock(ha);
1552 qla2x00_abort_isp_cleanup(vha);
1553 ha->isp_ops->get_flash_version(vha, vha->req->ring);
1554 ha->isp_ops->nvram_config(vha);
1555 qla8044_idc_lock(ha);
1556 }
1557
1558 if (!ha->flags.nic_core_reset_owner) {
1559 ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
1560 "%s(%ld): reset acknowledged\n",
1561 __func__, vha->host_no);
1562 qla8044_set_rst_ready(vha);
1563
1564 /* Non-reset owners ACK Reset and wait for device INIT state
1565 * as part of Reset Recovery by Reset Owner
1566 */
1567 dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1568
1569 do {
1570 if (time_after_eq(jiffies, dev_init_timeout)) {
1571 ql_log(ql_log_info, vha, 0xb0c4,
1572 "%s: Non Reset owner DEV INIT "
1573 "TIMEOUT!\n", __func__);
1574 break;
1575 }
1576
1577 qla8044_idc_unlock(ha);
1578 msleep(1000);
1579 qla8044_idc_lock(ha);
1580
1581 dev_state = qla8044_rd_direct(vha,
1582 QLA8044_CRB_DEV_STATE_INDEX);
1583 } while (dev_state == QLA8XXX_DEV_NEED_RESET);
1584 } else {
1585 qla8044_set_rst_ready(vha);
1586
1587 /* wait for 10 seconds for reset ack from all functions */
1588 reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1589
1590 drv_state = qla8044_rd_direct(vha,
1591 QLA8044_CRB_DRV_STATE_INDEX);
1592 drv_active = qla8044_rd_direct(vha,
1593 QLA8044_CRB_DRV_ACTIVE_INDEX);
1594
1595 ql_log(ql_log_info, vha, 0xb0c5,
1596 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
1597 __func__, vha->host_no, drv_state, drv_active);
1598
1599 while (drv_state != drv_active) {
1600 if (time_after_eq(jiffies, reset_timeout)) {
1601 ql_log(ql_log_info, vha, 0xb0c6,
1602 "%s: RESET TIMEOUT!"
1603 "drv_state: 0x%08x, drv_active: 0x%08x\n",
1604 QLA2XXX_DRIVER_NAME, drv_state, drv_active);
1605 break;
1606 }
1607
1608 qla8044_idc_unlock(ha);
1609 msleep(1000);
1610 qla8044_idc_lock(ha);
1611
1612 drv_state = qla8044_rd_direct(vha,
1613 QLA8044_CRB_DRV_STATE_INDEX);
1614 drv_active = qla8044_rd_direct(vha,
1615 QLA8044_CRB_DRV_ACTIVE_INDEX);
1616 }
1617
1618 if (drv_state != drv_active) {
1619 ql_log(ql_log_info, vha, 0xb0c7,
1620 "%s(%ld): Reset_owner turning off drv_active "
1621 "of non-acking function 0x%x\n", __func__,
1622 vha->host_no, (drv_active ^ drv_state));
1623 drv_active = drv_active & drv_state;
1624 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
1625 drv_active);
1626 }
1627
1628 /*
1629 * Clear RESET OWNER, will be set at next reset
1630 * by next RST_OWNER
1631 */
1632 ha->flags.nic_core_reset_owner = 0;
1633
1634 /* Start Reset Recovery */
1635 qla8044_device_bootstrap(vha);
1636 }
1637}
1638
1639static void
1640qla8044_set_drv_active(struct scsi_qla_host *vha)
1641{
1642 uint32_t drv_active;
1643 struct qla_hw_data *ha = vha->hw;
1644
1645 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1646
1647 /* For ISP8044, drv_active register has 1 bit per function,
1648 * shift 1 by func_num to set a bit for the function.*/
1649 drv_active |= (1 << ha->portnum);
1650
1651 ql_log(ql_log_info, vha, 0xb0c8,
1652 "%s(%ld): drv_active: 0x%08x\n",
1653 __func__, vha->host_no, drv_active);
1654 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1655}
1656
1657static void
1658qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
1659{
1660 uint32_t idc_ctrl;
1661 struct qla_hw_data *ha = vha->hw;
1662
1663 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1664 idc_ctrl &= ~DONTRESET_BIT0;
1665 ql_log(ql_log_info, vha, 0xb0c9,
1666 "%s: idc_ctrl = %d\n", __func__,
1667 idc_ctrl);
1668 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1669}
1670
1671static int
1672qla8044_set_idc_ver(struct scsi_qla_host *vha)
1673{
1674 int idc_ver;
1675 uint32_t drv_active;
1676 int rval = QLA_SUCCESS;
1677 struct qla_hw_data *ha = vha->hw;
1678
1679 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1680 if (drv_active == (1 << ha->portnum)) {
1681 idc_ver = qla8044_rd_direct(vha,
1682 QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1683 idc_ver &= (~0xFF);
1684 idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
1685 qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
1686 idc_ver);
1687 ql_log(ql_log_info, vha, 0xb0ca,
1688 "%s: IDC version updated to %d\n",
1689 __func__, idc_ver);
1690 } else {
1691 idc_ver = qla8044_rd_direct(vha,
1692 QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1693 idc_ver &= 0xFF;
1694 if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
1695 ql_log(ql_log_info, vha, 0xb0cb,
1696 "%s: qla4xxx driver IDC version %d "
1697 "is not compatible with IDC version %d "
1698 "of other drivers!\n",
1699 __func__, QLA8044_IDC_VER_MAJ_VALUE,
1700 idc_ver);
1701 rval = QLA_FUNCTION_FAILED;
1702 goto exit_set_idc_ver;
1703 }
1704 }
1705
1706 /* Update IDC_MINOR_VERSION */
1707 idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
1708 idc_ver &= ~(0x03 << (ha->portnum * 2));
1709 idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
1710 qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
1711
1712exit_set_idc_ver:
1713 return rval;
1714}
1715
1716static int
1717qla8044_update_idc_reg(struct scsi_qla_host *vha)
1718{
1719 uint32_t drv_active;
1720 int rval = QLA_SUCCESS;
1721 struct qla_hw_data *ha = vha->hw;
1722
1723 if (vha->flags.init_done)
1724 goto exit_update_idc_reg;
1725
1726 qla8044_idc_lock(ha);
1727 qla8044_set_drv_active(vha);
1728
1729 drv_active = qla8044_rd_direct(vha,
1730 QLA8044_CRB_DRV_ACTIVE_INDEX);
1731
1732 /* If we are the first driver to load and
1733 * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
1734 if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
1735 qla8044_clear_idc_dontreset(vha);
1736
1737 rval = qla8044_set_idc_ver(vha);
1738 if (rval == QLA_FUNCTION_FAILED)
1739 qla8044_clear_drv_active(vha);
1740 qla8044_idc_unlock(ha);
1741
1742exit_update_idc_reg:
1743 return rval;
1744}
1745
1746/**
1747 * qla8044_need_qsnt_handler - Code to start qsnt
1748 * @ha: pointer to adapter structure
1749 **/
1750static void
1751qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
1752{
1753 unsigned long qsnt_timeout;
1754 uint32_t drv_state, drv_active, dev_state;
1755 struct qla_hw_data *ha = vha->hw;
1756
1757 if (vha->flags.online)
1758 qla2x00_quiesce_io(vha);
1759 else
1760 return;
1761
1762 qla8044_set_qsnt_ready(vha);
1763
1764 /* Wait for 30 secs for all functions to ack qsnt mode */
1765 qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
1766 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1767 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1768
1769 /* Shift drv_active by 1 to match drv_state. As quiescent ready bit
1770 position is at bit 1 and drv active is at bit 0 */
1771 drv_active = drv_active << 1;
1772
1773 while (drv_state != drv_active) {
1774 if (time_after_eq(jiffies, qsnt_timeout)) {
1775 /* Other functions did not ack, changing state to
1776 * DEV_READY
1777 */
1778 clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1779 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1780 QLA8XXX_DEV_READY);
1781 qla8044_clear_qsnt_ready(vha);
1782 ql_log(ql_log_info, vha, 0xb0cc,
1783 "Timeout waiting for quiescent ack!!!\n");
1784 return;
1785 }
1786 qla8044_idc_unlock(ha);
1787 msleep(1000);
1788 qla8044_idc_lock(ha);
1789
1790 drv_state = qla8044_rd_direct(vha,
1791 QLA8044_CRB_DRV_STATE_INDEX);
1792 drv_active = qla8044_rd_direct(vha,
1793 QLA8044_CRB_DRV_ACTIVE_INDEX);
1794 drv_active = drv_active << 1;
1795 }
1796
1797 /* All functions have Acked. Set quiescent state */
1798 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1799
1800 if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
1801 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1802 QLA8XXX_DEV_QUIESCENT);
1803 ql_log(ql_log_info, vha, 0xb0cd,
1804 "%s: HW State: QUIESCENT\n", __func__);
1805 }
1806}
1807
1808/*
1809 * qla8044_device_state_handler - Adapter state machine
1810 * @ha: pointer to host adapter structure.
1811 *
1812 * Note: IDC lock must be UNLOCKED upon entry
1813 **/
1814int
1815qla8044_device_state_handler(struct scsi_qla_host *vha)
1816{
1817 uint32_t dev_state;
1818 int rval = QLA_SUCCESS;
1819 unsigned long dev_init_timeout;
1820 struct qla_hw_data *ha = vha->hw;
1821
1822 rval = qla8044_update_idc_reg(vha);
1823 if (rval == QLA_FUNCTION_FAILED)
1824 goto exit_error;
1825
1826 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1827 ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
1828 "Device state is 0x%x = %s\n",
1829 dev_state, dev_state < MAX_STATES ?
1830 qdev_state(dev_state) : "Unknown");
1831
1832 /* wait for 30 seconds for device to go ready */
1833 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
1834
1835 qla8044_idc_lock(ha);
1836
1837 while (1) {
1838 if (time_after_eq(jiffies, dev_init_timeout)) {
1839 ql_log(ql_log_warn, vha, 0xb0cf,
1840 "%s: Device Init Failed 0x%x = %s\n",
1841 QLA2XXX_DRIVER_NAME, dev_state,
1842 dev_state < MAX_STATES ?
1843 qdev_state(dev_state) : "Unknown");
1844
1845 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1846 QLA8XXX_DEV_FAILED);
1847 }
1848
1849 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1850 ql_log(ql_log_info, vha, 0xb0d0,
1851 "Device state is 0x%x = %s\n",
1852 dev_state, dev_state < MAX_STATES ?
1853 qdev_state(dev_state) : "Unknown");
1854
1855 /* NOTE: Make sure idc unlocked upon exit of switch statement */
1856 switch (dev_state) {
1857 case QLA8XXX_DEV_READY:
1858 ha->flags.nic_core_reset_owner = 0;
1859 goto exit;
1860 case QLA8XXX_DEV_COLD:
1861 rval = qla8044_device_bootstrap(vha);
1862 goto exit;
1863 case QLA8XXX_DEV_INITIALIZING:
1864 qla8044_idc_unlock(ha);
1865 msleep(1000);
1866 qla8044_idc_lock(ha);
1867 break;
1868 case QLA8XXX_DEV_NEED_RESET:
1869 /* For ISP8044, if NEED_RESET is set by any driver,
1870 * it should be honored, irrespective of IDC_CTRL
1871 * DONTRESET_BIT0 */
1872 qla8044_need_reset_handler(vha);
1873 break;
1874 case QLA8XXX_DEV_NEED_QUIESCENT:
1875 /* idc locked/unlocked in handler */
1876 qla8044_need_qsnt_handler(vha);
1877
1878 /* Reset the init timeout after qsnt handler */
1879 dev_init_timeout = jiffies +
1880 (ha->fcoe_reset_timeout * HZ);
1881 break;
1882 case QLA8XXX_DEV_QUIESCENT:
1883 ql_log(ql_log_info, vha, 0xb0d1,
1884 "HW State: QUIESCENT\n");
1885
1886 qla8044_idc_unlock(ha);
1887 msleep(1000);
1888 qla8044_idc_lock(ha);
1889
1890 /* Reset the init timeout after qsnt handler */
1891 dev_init_timeout = jiffies +
1892 (ha->fcoe_reset_timeout * HZ);
1893 break;
1894 case QLA8XXX_DEV_FAILED:
1895 ha->flags.nic_core_reset_owner = 0;
1896 qla8044_idc_unlock(ha);
1897 qla8xxx_dev_failed_handler(vha);
1898 rval = QLA_FUNCTION_FAILED;
1899 qla8044_idc_lock(ha);
1900 goto exit;
1901 default:
1902 qla8044_idc_unlock(ha);
1903 qla8xxx_dev_failed_handler(vha);
1904 rval = QLA_FUNCTION_FAILED;
1905 qla8044_idc_lock(ha);
1906 goto exit;
1907 }
1908 }
1909exit:
1910 qla8044_idc_unlock(ha);
1911
1912exit_error:
1913 return rval;
1914}
1915
1916/**
1917 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
1918 * @ha: adapter block pointer.
1919 *
1920 * Note: The caller should not hold the idc lock.
1921 **/
1922static int
1923qla8044_check_temp(struct scsi_qla_host *vha)
1924{
1925 uint32_t temp, temp_state, temp_val;
1926 int status = QLA_SUCCESS;
1927
1928 temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
1929 temp_state = qla82xx_get_temp_state(temp);
1930 temp_val = qla82xx_get_temp_val(temp);
1931
1932 if (temp_state == QLA82XX_TEMP_PANIC) {
1933 ql_log(ql_log_warn, vha, 0xb0d2,
1934 "Device temperature %d degrees C"
1935 " exceeds maximum allowed. Hardware has been shut"
1936 " down\n", temp_val);
1937 status = QLA_FUNCTION_FAILED;
1938 return status;
1939 } else if (temp_state == QLA82XX_TEMP_WARN) {
1940 ql_log(ql_log_warn, vha, 0xb0d3,
1941 "Device temperature %d"
1942 " degrees C exceeds operating range."
1943 " Immediate action needed.\n", temp_val);
1944 }
1945 return 0;
1946}
1947
1948int qla8044_read_temperature(scsi_qla_host_t *vha)
1949{
1950 uint32_t temp;
1951
1952 temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
1953 return qla82xx_get_temp_val(temp);
1954}
1955
1956/**
1957 * qla8044_check_fw_alive - Check firmware health
1958 * @ha: Pointer to host adapter structure.
1959 *
1960 * Context: Interrupt
1961 **/
1962int
1963qla8044_check_fw_alive(struct scsi_qla_host *vha)
1964{
1965 uint32_t fw_heartbeat_counter;
1966 uint32_t halt_status1, halt_status2;
1967 int status = QLA_SUCCESS;
1968
1969 fw_heartbeat_counter = qla8044_rd_direct(vha,
1970 QLA8044_PEG_ALIVE_COUNTER_INDEX);
1971
1972 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1973 if (fw_heartbeat_counter == 0xffffffff) {
1974 ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
1975 "scsi%ld: %s: Device in frozen "
1976 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1977 vha->host_no, __func__);
1978 return status;
1979 }
1980
1981 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
1982 vha->seconds_since_last_heartbeat++;
1983 /* FW not alive after 2 seconds */
1984 if (vha->seconds_since_last_heartbeat == 2) {
1985 vha->seconds_since_last_heartbeat = 0;
1986 halt_status1 = qla8044_rd_direct(vha,
1987 QLA8044_PEG_HALT_STATUS1_INDEX);
1988 halt_status2 = qla8044_rd_direct(vha,
1989 QLA8044_PEG_HALT_STATUS2_INDEX);
1990
1991 ql_log(ql_log_info, vha, 0xb0d5,
1992 "scsi(%ld): %s, ISP8044 "
1993 "Dumping hw/fw registers:\n"
1994 " PEG_HALT_STATUS1: 0x%x, "
1995 "PEG_HALT_STATUS2: 0x%x,\n",
1996 vha->host_no, __func__, halt_status1,
1997 halt_status2);
1998 status = QLA_FUNCTION_FAILED;
1999 }
2000 } else
2001 vha->seconds_since_last_heartbeat = 0;
2002
2003 vha->fw_heartbeat_counter = fw_heartbeat_counter;
2004 return status;
2005}
2006
2007void
2008qla8044_watchdog(struct scsi_qla_host *vha)
2009{
2010 uint32_t dev_state, halt_status;
2011 int halt_status_unrecoverable = 0;
2012 struct qla_hw_data *ha = vha->hw;
2013
2014 /* don't poll if reset is going on or FW hang in quiescent state */
2015 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2016 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2017 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
2018 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2019 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2020
2021 if (qla8044_check_temp(vha)) {
2022 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
2023 ha->flags.isp82xx_fw_hung = 1;
2024 qla2xxx_wake_dpc(vha);
2025 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2026 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
2027 ql_log(ql_log_info, vha, 0xb0d6,
2028 "%s: HW State: NEED RESET!\n",
2029 __func__);
2030 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2031 qla2xxx_wake_dpc(vha);
2032 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2033 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
2034 ql_log(ql_log_info, vha, 0xb0d7,
2035 "%s: HW State: NEED QUIES detected!\n",
2036 __func__);
2037 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
2038 qla2xxx_wake_dpc(vha);
2039 } else {
2040 /* Check firmware health */
2041 if (qla8044_check_fw_alive(vha)) {
2042 halt_status = qla8044_rd_direct(vha,
2043 QLA8044_PEG_HALT_STATUS1_INDEX);
2044 if (halt_status &
2045 QLA8044_HALT_STATUS_FW_RESET) {
2046 ql_log(ql_log_fatal, vha,
2047 0xb0d8, "%s: Firmware "
2048 "error detected device "
2049 "is being reset\n",
2050 __func__);
2051 } else if (halt_status &
2052 QLA8044_HALT_STATUS_UNRECOVERABLE) {
2053 halt_status_unrecoverable = 1;
2054 }
2055
2056 /* Since we cannot change dev_state in interrupt
2057 * context, set appropriate DPC flag then wakeup
2058 * DPC */
2059 if (halt_status_unrecoverable) {
2060 set_bit(ISP_UNRECOVERABLE,
2061 &vha->dpc_flags);
2062 } else {
2063 if (dev_state ==
2064 QLA8XXX_DEV_QUIESCENT) {
2065 set_bit(FCOE_CTX_RESET_NEEDED,
2066 &vha->dpc_flags);
2067 ql_log(ql_log_info, vha, 0xb0d9,
2068 "%s: FW CONTEXT Reset "
2069 "needed!\n", __func__);
2070 } else {
2071 ql_log(ql_log_info, vha,
2072 0xb0da, "%s: "
2073 "detect abort needed\n",
2074 __func__);
2075 set_bit(ISP_ABORT_NEEDED,
2076 &vha->dpc_flags);
2077 qla82xx_clear_pending_mbx(vha);
2078 }
2079 }
2080 ha->flags.isp82xx_fw_hung = 1;
2081 ql_log(ql_log_warn, vha, 0xb10a,
2082 "Firmware hung.\n");
2083 qla2xxx_wake_dpc(vha);
2084 }
2085 }
2086
2087 }
2088}
2089
2090static int
2091qla8044_minidump_process_control(struct scsi_qla_host *vha,
2092 struct qla8044_minidump_entry_hdr *entry_hdr)
2093{
2094 struct qla8044_minidump_entry_crb *crb_entry;
2095 uint32_t read_value, opcode, poll_time, addr, index;
2096 uint32_t crb_addr, rval = QLA_SUCCESS;
2097 unsigned long wtime;
2098 struct qla8044_minidump_template_hdr *tmplt_hdr;
2099 int i;
2100 struct qla_hw_data *ha = vha->hw;
2101
2102 ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
2103 tmplt_hdr = (struct qla8044_minidump_template_hdr *)
2104 ha->md_tmplt_hdr;
2105 crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
2106
2107 crb_addr = crb_entry->addr;
2108 for (i = 0; i < crb_entry->op_count; i++) {
2109 opcode = crb_entry->crb_ctrl.opcode;
2110
2111 if (opcode & QLA82XX_DBG_OPCODE_WR) {
2112 qla8044_wr_reg_indirect(vha, crb_addr,
2113 crb_entry->value_1);
2114 opcode &= ~QLA82XX_DBG_OPCODE_WR;
2115 }
2116
2117 if (opcode & QLA82XX_DBG_OPCODE_RW) {
2118 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2119 qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2120 opcode &= ~QLA82XX_DBG_OPCODE_RW;
2121 }
2122
2123 if (opcode & QLA82XX_DBG_OPCODE_AND) {
2124 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2125 read_value &= crb_entry->value_2;
2126 opcode &= ~QLA82XX_DBG_OPCODE_AND;
2127 if (opcode & QLA82XX_DBG_OPCODE_OR) {
2128 read_value |= crb_entry->value_3;
2129 opcode &= ~QLA82XX_DBG_OPCODE_OR;
2130 }
2131 qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2132 }
2133 if (opcode & QLA82XX_DBG_OPCODE_OR) {
2134 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2135 read_value |= crb_entry->value_3;
2136 qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2137 opcode &= ~QLA82XX_DBG_OPCODE_OR;
2138 }
2139 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
2140 poll_time = crb_entry->crb_strd.poll_timeout;
2141 wtime = jiffies + poll_time;
2142 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2143
2144 do {
2145 if ((read_value & crb_entry->value_2) ==
2146 crb_entry->value_1) {
2147 break;
2148 } else if (time_after_eq(jiffies, wtime)) {
2149 /* capturing dump failed */
2150 rval = QLA_FUNCTION_FAILED;
2151 break;
2152 } else {
2153 qla8044_rd_reg_indirect(vha,
2154 crb_addr, &read_value);
2155 }
2156 } while (1);
2157 opcode &= ~QLA82XX_DBG_OPCODE_POLL;
2158 }
2159
2160 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
2161 if (crb_entry->crb_strd.state_index_a) {
2162 index = crb_entry->crb_strd.state_index_a;
2163 addr = tmplt_hdr->saved_state_array[index];
2164 } else {
2165 addr = crb_addr;
2166 }
2167
2168 qla8044_rd_reg_indirect(vha, addr, &read_value);
2169 index = crb_entry->crb_ctrl.state_index_v;
2170 tmplt_hdr->saved_state_array[index] = read_value;
2171 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
2172 }
2173
2174 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
2175 if (crb_entry->crb_strd.state_index_a) {
2176 index = crb_entry->crb_strd.state_index_a;
2177 addr = tmplt_hdr->saved_state_array[index];
2178 } else {
2179 addr = crb_addr;
2180 }
2181
2182 if (crb_entry->crb_ctrl.state_index_v) {
2183 index = crb_entry->crb_ctrl.state_index_v;
2184 read_value =
2185 tmplt_hdr->saved_state_array[index];
2186 } else {
2187 read_value = crb_entry->value_1;
2188 }
2189
2190 qla8044_wr_reg_indirect(vha, addr, read_value);
2191 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
2192 }
2193
2194 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
2195 index = crb_entry->crb_ctrl.state_index_v;
2196 read_value = tmplt_hdr->saved_state_array[index];
2197 read_value <<= crb_entry->crb_ctrl.shl;
2198 read_value >>= crb_entry->crb_ctrl.shr;
2199 if (crb_entry->value_2)
2200 read_value &= crb_entry->value_2;
2201 read_value |= crb_entry->value_3;
2202 read_value += crb_entry->value_1;
2203 tmplt_hdr->saved_state_array[index] = read_value;
2204 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
2205 }
2206 crb_addr += crb_entry->crb_strd.addr_stride;
2207 }
2208 return rval;
2209}
2210
2211static void
2212qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
2213 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2214{
2215 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2216 struct qla8044_minidump_entry_crb *crb_hdr;
2217 uint32_t *data_ptr = *d_ptr;
2218
2219 ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
2220 crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
2221 r_addr = crb_hdr->addr;
2222 r_stride = crb_hdr->crb_strd.addr_stride;
2223 loop_cnt = crb_hdr->op_count;
2224
2225 for (i = 0; i < loop_cnt; i++) {
2226 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2227 *data_ptr++ = r_addr;
2228 *data_ptr++ = r_value;
2229 r_addr += r_stride;
2230 }
2231 *d_ptr = data_ptr;
2232}
2233
2234static int
2235qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
2236 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2237{
2238 uint32_t r_addr, r_value, r_data;
2239 uint32_t i, j, loop_cnt;
2240 struct qla8044_minidump_entry_rdmem *m_hdr;
2241 unsigned long flags;
2242 uint32_t *data_ptr = *d_ptr;
2243 struct qla_hw_data *ha = vha->hw;
2244
2245 ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
2246 m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
2247 r_addr = m_hdr->read_addr;
2248 loop_cnt = m_hdr->read_data_size/16;
2249
2250 ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
2251 "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2252 __func__, r_addr, m_hdr->read_data_size);
2253
2254 if (r_addr & 0xf) {
2255 ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
2256 "[%s]: Read addr 0x%x not 16 bytes alligned\n",
2257 __func__, r_addr);
2258 return QLA_FUNCTION_FAILED;
2259 }
2260
2261 if (m_hdr->read_data_size % 16) {
2262 ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
2263 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2264 __func__, m_hdr->read_data_size);
2265 return QLA_FUNCTION_FAILED;
2266 }
2267
2268 ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
2269 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2270 __func__, r_addr, m_hdr->read_data_size, loop_cnt);
2271
2272 write_lock_irqsave(&ha->hw_lock, flags);
2273 for (i = 0; i < loop_cnt; i++) {
2274 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
2275 r_value = 0;
2276 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
2277 r_value = MIU_TA_CTL_ENABLE;
2278 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2279 r_value = MIU_TA_CTL_START_ENABLE;
2280 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2281
2282 for (j = 0; j < MAX_CTL_CHECK; j++) {
2283 qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
2284 &r_value);
2285 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2286 break;
2287 }
2288
2289 if (j >= MAX_CTL_CHECK) {
2290 printk_ratelimited(KERN_ERR
2291 "%s: failed to read through agent\n", __func__);
2292 write_unlock_irqrestore(&ha->hw_lock, flags);
2293 return QLA_SUCCESS;
2294 }
2295
2296 for (j = 0; j < 4; j++) {
2297 qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
2298 &r_data);
2299 *data_ptr++ = r_data;
2300 }
2301
2302 r_addr += 16;
2303 }
2304 write_unlock_irqrestore(&ha->hw_lock, flags);
2305
2306 ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
2307 "Leaving fn: %s datacount: 0x%x\n",
2308 __func__, (loop_cnt * 16));
2309
2310 *d_ptr = data_ptr;
2311 return QLA_SUCCESS;
2312}
2313
2314/* ISP83xx flash read for _RDROM _BOARD */
2315static uint32_t
2316qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
2317 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2318{
2319 uint32_t fl_addr, u32_count, rval;
2320 struct qla8044_minidump_entry_rdrom *rom_hdr;
2321 uint32_t *data_ptr = *d_ptr;
2322
2323 rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
2324 fl_addr = rom_hdr->read_addr;
2325 u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
2326
2327 ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2328 __func__, fl_addr, u32_count);
2329
2330 rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
2331 (u8 *)(data_ptr), u32_count);
2332
2333 if (rval != QLA_SUCCESS) {
2334 ql_log(ql_log_fatal, vha, 0xb0f6,
2335 "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
2336 return QLA_FUNCTION_FAILED;
2337 } else {
2338 data_ptr += u32_count;
2339 *d_ptr = data_ptr;
2340 return QLA_SUCCESS;
2341 }
2342}
2343
2344static void
2345qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
2346 struct qla8044_minidump_entry_hdr *entry_hdr, int index)
2347{
2348 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2349
2350 ql_log(ql_log_info, vha, 0xb0f7,
2351 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2352 vha->host_no, index, entry_hdr->entry_type,
2353 entry_hdr->d_ctrl.entry_capture_mask);
2354}
2355
2356static int
2357qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
2358 struct qla8044_minidump_entry_hdr *entry_hdr,
2359 uint32_t **d_ptr)
2360{
2361 uint32_t addr, r_addr, c_addr, t_r_addr;
2362 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2363 unsigned long p_wait, w_time, p_mask;
2364 uint32_t c_value_w, c_value_r;
2365 struct qla8044_minidump_entry_cache *cache_hdr;
2366 int rval = QLA_FUNCTION_FAILED;
2367 uint32_t *data_ptr = *d_ptr;
2368
2369 ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
2370 cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2371
2372 loop_count = cache_hdr->op_count;
2373 r_addr = cache_hdr->read_addr;
2374 c_addr = cache_hdr->control_addr;
2375 c_value_w = cache_hdr->cache_ctrl.write_value;
2376
2377 t_r_addr = cache_hdr->tag_reg_addr;
2378 t_value = cache_hdr->addr_ctrl.init_tag_value;
2379 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2380 p_wait = cache_hdr->cache_ctrl.poll_wait;
2381 p_mask = cache_hdr->cache_ctrl.poll_mask;
2382
2383 for (i = 0; i < loop_count; i++) {
2384 qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2385 if (c_value_w)
2386 qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2387
2388 if (p_mask) {
2389 w_time = jiffies + p_wait;
2390 do {
2391 qla8044_rd_reg_indirect(vha, c_addr,
2392 &c_value_r);
2393 if ((c_value_r & p_mask) == 0) {
2394 break;
2395 } else if (time_after_eq(jiffies, w_time)) {
2396 /* capturing dump failed */
2397 return rval;
2398 }
2399 } while (1);
2400 }
2401
2402 addr = r_addr;
2403 for (k = 0; k < r_cnt; k++) {
2404 qla8044_rd_reg_indirect(vha, addr, &r_value);
2405 *data_ptr++ = r_value;
2406 addr += cache_hdr->read_ctrl.read_addr_stride;
2407 }
2408 t_value += cache_hdr->addr_ctrl.tag_value_stride;
2409 }
2410 *d_ptr = data_ptr;
2411 return QLA_SUCCESS;
2412}
2413
2414static void
2415qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
2416 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2417{
2418 uint32_t addr, r_addr, c_addr, t_r_addr;
2419 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2420 uint32_t c_value_w;
2421 struct qla8044_minidump_entry_cache *cache_hdr;
2422 uint32_t *data_ptr = *d_ptr;
2423
2424 cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2425 loop_count = cache_hdr->op_count;
2426 r_addr = cache_hdr->read_addr;
2427 c_addr = cache_hdr->control_addr;
2428 c_value_w = cache_hdr->cache_ctrl.write_value;
2429
2430 t_r_addr = cache_hdr->tag_reg_addr;
2431 t_value = cache_hdr->addr_ctrl.init_tag_value;
2432 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2433
2434 for (i = 0; i < loop_count; i++) {
2435 qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2436 qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2437 addr = r_addr;
2438 for (k = 0; k < r_cnt; k++) {
2439 qla8044_rd_reg_indirect(vha, addr, &r_value);
2440 *data_ptr++ = r_value;
2441 addr += cache_hdr->read_ctrl.read_addr_stride;
2442 }
2443 t_value += cache_hdr->addr_ctrl.tag_value_stride;
2444 }
2445 *d_ptr = data_ptr;
2446}
2447
2448static void
2449qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
2450 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2451{
2452 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2453 struct qla8044_minidump_entry_rdocm *ocm_hdr;
2454 uint32_t *data_ptr = *d_ptr;
2455 struct qla_hw_data *ha = vha->hw;
2456
2457 ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
2458
2459 ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
2460 r_addr = ocm_hdr->read_addr;
2461 r_stride = ocm_hdr->read_addr_stride;
2462 loop_cnt = ocm_hdr->op_count;
2463
2464 ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
2465 "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
2466 __func__, r_addr, r_stride, loop_cnt);
2467
2468 for (i = 0; i < loop_cnt; i++) {
2469 r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
2470 *data_ptr++ = r_value;
2471 r_addr += r_stride;
2472 }
2473 ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
2474 __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
2475
2476 *d_ptr = data_ptr;
2477}
2478
2479static void
2480qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
2481 struct qla8044_minidump_entry_hdr *entry_hdr,
2482 uint32_t **d_ptr)
2483{
2484 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
2485 struct qla8044_minidump_entry_mux *mux_hdr;
2486 uint32_t *data_ptr = *d_ptr;
2487
2488 ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
2489
2490 mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
2491 r_addr = mux_hdr->read_addr;
2492 s_addr = mux_hdr->select_addr;
2493 s_stride = mux_hdr->select_value_stride;
2494 s_value = mux_hdr->select_value;
2495 loop_cnt = mux_hdr->op_count;
2496
2497 for (i = 0; i < loop_cnt; i++) {
2498 qla8044_wr_reg_indirect(vha, s_addr, s_value);
2499 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2500 *data_ptr++ = s_value;
2501 *data_ptr++ = r_value;
2502 s_value += s_stride;
2503 }
2504 *d_ptr = data_ptr;
2505}
2506
2507static void
2508qla8044_minidump_process_queue(struct scsi_qla_host *vha,
2509 struct qla8044_minidump_entry_hdr *entry_hdr,
2510 uint32_t **d_ptr)
2511{
2512 uint32_t s_addr, r_addr;
2513 uint32_t r_stride, r_value, r_cnt, qid = 0;
2514 uint32_t i, k, loop_cnt;
2515 struct qla8044_minidump_entry_queue *q_hdr;
2516 uint32_t *data_ptr = *d_ptr;
2517
2518 ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
2519 q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
2520 s_addr = q_hdr->select_addr;
2521 r_cnt = q_hdr->rd_strd.read_addr_cnt;
2522 r_stride = q_hdr->rd_strd.read_addr_stride;
2523 loop_cnt = q_hdr->op_count;
2524
2525 for (i = 0; i < loop_cnt; i++) {
2526 qla8044_wr_reg_indirect(vha, s_addr, qid);
2527 r_addr = q_hdr->read_addr;
2528 for (k = 0; k < r_cnt; k++) {
2529 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2530 *data_ptr++ = r_value;
2531 r_addr += r_stride;
2532 }
2533 qid += q_hdr->q_strd.queue_id_stride;
2534 }
2535 *d_ptr = data_ptr;
2536}
2537
2538/* ISP83xx functions to process new minidump entries... */
2539static uint32_t
2540qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
2541 struct qla8044_minidump_entry_hdr *entry_hdr,
2542 uint32_t **d_ptr)
2543{
2544 uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2545 uint16_t s_stride, i;
2546 struct qla8044_minidump_entry_pollrd *pollrd_hdr;
2547 uint32_t *data_ptr = *d_ptr;
2548
2549 pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
2550 s_addr = pollrd_hdr->select_addr;
2551 r_addr = pollrd_hdr->read_addr;
2552 s_value = pollrd_hdr->select_value;
2553 s_stride = pollrd_hdr->select_value_stride;
2554
2555 poll_wait = pollrd_hdr->poll_wait;
2556 poll_mask = pollrd_hdr->poll_mask;
2557
2558 for (i = 0; i < pollrd_hdr->op_count; i++) {
2559 qla8044_wr_reg_indirect(vha, s_addr, s_value);
2560 poll_wait = pollrd_hdr->poll_wait;
2561 while (1) {
2562 qla8044_rd_reg_indirect(vha, s_addr, &r_value);
2563 if ((r_value & poll_mask) != 0) {
2564 break;
2565 } else {
2566 usleep_range(1000, 1100);
2567 if (--poll_wait == 0) {
2568 ql_log(ql_log_fatal, vha, 0xb0fe,
2569 "%s: TIMEOUT\n", __func__);
2570 goto error;
2571 }
2572 }
2573 }
2574 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2575 *data_ptr++ = s_value;
2576 *data_ptr++ = r_value;
2577
2578 s_value += s_stride;
2579 }
2580 *d_ptr = data_ptr;
2581 return QLA_SUCCESS;
2582
2583error:
2584 return QLA_FUNCTION_FAILED;
2585}
2586
2587static void
2588qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
2589 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2590{
2591 uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2592 uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2593 struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
2594 uint32_t *data_ptr = *d_ptr;
2595
2596 rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
2597 sel_val1 = rdmux2_hdr->select_value_1;
2598 sel_val2 = rdmux2_hdr->select_value_2;
2599 sel_addr1 = rdmux2_hdr->select_addr_1;
2600 sel_addr2 = rdmux2_hdr->select_addr_2;
2601 sel_val_mask = rdmux2_hdr->select_value_mask;
2602 read_addr = rdmux2_hdr->read_addr;
2603
2604 for (i = 0; i < rdmux2_hdr->op_count; i++) {
2605 qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
2606 t_sel_val = sel_val1 & sel_val_mask;
2607 *data_ptr++ = t_sel_val;
2608
2609 qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2610 qla8044_rd_reg_indirect(vha, read_addr, &data);
2611
2612 *data_ptr++ = data;
2613
2614 qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
2615 t_sel_val = sel_val2 & sel_val_mask;
2616 *data_ptr++ = t_sel_val;
2617
2618 qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2619 qla8044_rd_reg_indirect(vha, read_addr, &data);
2620
2621 *data_ptr++ = data;
2622
2623 sel_val1 += rdmux2_hdr->select_value_stride;
2624 sel_val2 += rdmux2_hdr->select_value_stride;
2625 }
2626
2627 *d_ptr = data_ptr;
2628}
2629
2630static uint32_t
2631qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
2632 struct qla8044_minidump_entry_hdr *entry_hdr,
2633 uint32_t **d_ptr)
2634{
2635 uint32_t poll_wait, poll_mask, r_value, data;
2636 uint32_t addr_1, addr_2, value_1, value_2;
2637 struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
2638 uint32_t *data_ptr = *d_ptr;
2639
2640 poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
2641 addr_1 = poll_hdr->addr_1;
2642 addr_2 = poll_hdr->addr_2;
2643 value_1 = poll_hdr->value_1;
2644 value_2 = poll_hdr->value_2;
2645 poll_mask = poll_hdr->poll_mask;
2646
2647 qla8044_wr_reg_indirect(vha, addr_1, value_1);
2648
2649 poll_wait = poll_hdr->poll_wait;
2650 while (1) {
2651 qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2652
2653 if ((r_value & poll_mask) != 0) {
2654 break;
2655 } else {
2656 usleep_range(1000, 1100);
2657 if (--poll_wait == 0) {
2658 ql_log(ql_log_fatal, vha, 0xb0ff,
2659 "%s: TIMEOUT\n", __func__);
2660 goto error;
2661 }
2662 }
2663 }
2664
2665 qla8044_rd_reg_indirect(vha, addr_2, &data);
2666 data &= poll_hdr->modify_mask;
2667 qla8044_wr_reg_indirect(vha, addr_2, data);
2668 qla8044_wr_reg_indirect(vha, addr_1, value_2);
2669
2670 poll_wait = poll_hdr->poll_wait;
2671 while (1) {
2672 qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2673
2674 if ((r_value & poll_mask) != 0) {
2675 break;
2676 } else {
2677 usleep_range(1000, 1100);
2678 if (--poll_wait == 0) {
2679 ql_log(ql_log_fatal, vha, 0xb100,
2680 "%s: TIMEOUT2\n", __func__);
2681 goto error;
2682 }
2683 }
2684 }
2685
2686 *data_ptr++ = addr_2;
2687 *data_ptr++ = data;
2688
2689 *d_ptr = data_ptr;
2690
2691 return QLA_SUCCESS;
2692
2693error:
2694 return QLA_FUNCTION_FAILED;
2695}
2696
2697#define ISP8044_PEX_DMA_ENGINE_INDEX 8
2698#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
2699#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000
2700#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
2701#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
2702#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
2703
2704#define ISP8044_PEX_DMA_READ_SIZE (16 * 1024)
2705#define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
2706
2707static int
2708qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
2709{
2710 struct qla_hw_data *ha = vha->hw;
2711 int rval = QLA_SUCCESS;
2712 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2713 uint64_t dma_base_addr = 0;
2714 struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2715
2716 tmplt_hdr = ha->md_tmplt_hdr;
2717 dma_eng_num =
2718 tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2719 dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2720 (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2721
2722 /* Read the pex-dma's command-status-and-control register. */
2723 rval = qla8044_rd_reg_indirect(vha,
2724 (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2725 &cmd_sts_and_cntrl);
2726 if (rval)
2727 return QLA_FUNCTION_FAILED;
2728
2729 /* Check if requested pex-dma engine is available. */
2730 if (cmd_sts_and_cntrl & BIT_31)
2731 return QLA_SUCCESS;
2732
2733 return QLA_FUNCTION_FAILED;
2734}
2735
2736static int
2737qla8044_start_pex_dma(struct scsi_qla_host *vha,
2738 struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
2739{
2740 struct qla_hw_data *ha = vha->hw;
2741 int rval = QLA_SUCCESS, wait = 0;
2742 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2743 uint64_t dma_base_addr = 0;
2744 struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2745
2746 tmplt_hdr = ha->md_tmplt_hdr;
2747 dma_eng_num =
2748 tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2749 dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2750 (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2751
2752 rval = qla8044_wr_reg_indirect(vha,
2753 dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
2754 m_hdr->desc_card_addr);
2755 if (rval)
2756 goto error_exit;
2757
2758 rval = qla8044_wr_reg_indirect(vha,
2759 dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
2760 if (rval)
2761 goto error_exit;
2762
2763 rval = qla8044_wr_reg_indirect(vha,
2764 dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
2765 m_hdr->start_dma_cmd);
2766 if (rval)
2767 goto error_exit;
2768
2769 /* Wait for dma operation to complete. */
2770 for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
2771 rval = qla8044_rd_reg_indirect(vha,
2772 (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2773 &cmd_sts_and_cntrl);
2774 if (rval)
2775 goto error_exit;
2776
2777 if ((cmd_sts_and_cntrl & BIT_1) == 0)
2778 break;
2779
2780 udelay(10);
2781 }
2782
2783 /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
2784 if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
2785 rval = QLA_FUNCTION_FAILED;
2786 goto error_exit;
2787 }
2788
2789error_exit:
2790 return rval;
2791}
2792
2793static int
2794qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
2795 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2796{
2797 struct qla_hw_data *ha = vha->hw;
2798 int rval = QLA_SUCCESS;
2799 struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
2800 uint32_t chunk_size, read_size;
2801 uint8_t *data_ptr = (uint8_t *)*d_ptr;
2802 void *rdmem_buffer = NULL;
2803 dma_addr_t rdmem_dma;
2804 struct qla8044_pex_dma_descriptor dma_desc;
2805
2806 rval = qla8044_check_dma_engine_state(vha);
2807 if (rval != QLA_SUCCESS) {
2808 ql_dbg(ql_dbg_p3p, vha, 0xb147,
2809 "DMA engine not available. Fallback to rdmem-read.\n");
2810 return QLA_FUNCTION_FAILED;
2811 }
2812
2813 m_hdr = (void *)entry_hdr;
2814
2815 rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
2816 ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
2817 if (!rdmem_buffer) {
2818 ql_dbg(ql_dbg_p3p, vha, 0xb148,
2819 "Unable to allocate rdmem dma buffer\n");
2820 return QLA_FUNCTION_FAILED;
2821 }
2822
2823 /* Prepare pex-dma descriptor to be written to MS memory. */
2824 /* dma-desc-cmd layout:
2825 * 0-3: dma-desc-cmd 0-3
2826 * 4-7: pcid function number
2827 * 8-15: dma-desc-cmd 8-15
2828 * dma_bus_addr: dma buffer address
2829 * cmd.read_data_size: amount of data-chunk to be read.
2830 */
2831 dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
2832 dma_desc.cmd.dma_desc_cmd |=
2833 ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
2834
2835 dma_desc.dma_bus_addr = rdmem_dma;
2836 dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
2837 read_size = 0;
2838
2839 /*
2840 * Perform rdmem operation using pex-dma.
2841 * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
2842 */
2843 while (read_size < m_hdr->read_data_size) {
2844 if (m_hdr->read_data_size - read_size <
2845 ISP8044_PEX_DMA_READ_SIZE) {
2846 chunk_size = (m_hdr->read_data_size - read_size);
2847 dma_desc.cmd.read_data_size = chunk_size;
2848 }
2849
2850 dma_desc.src_addr = m_hdr->read_addr + read_size;
2851
2852 /* Prepare: Write pex-dma descriptor to MS memory. */
2853 rval = qla8044_ms_mem_write_128b(vha,
2854 m_hdr->desc_card_addr, (void *)&dma_desc,
2855 (sizeof(struct qla8044_pex_dma_descriptor)/16));
2856 if (rval) {
2857 ql_log(ql_log_warn, vha, 0xb14a,
2858 "%s: Error writing rdmem-dma-init to MS !!!\n",
2859 __func__);
2860 goto error_exit;
2861 }
2862 ql_dbg(ql_dbg_p3p, vha, 0xb14b,
2863 "%s: Dma-descriptor: Instruct for rdmem dma "
2864 "(chunk_size 0x%x).\n", __func__, chunk_size);
2865
2866 /* Execute: Start pex-dma operation. */
2867 rval = qla8044_start_pex_dma(vha, m_hdr);
2868 if (rval)
2869 goto error_exit;
2870
2871 memcpy(data_ptr, rdmem_buffer, chunk_size);
2872 data_ptr += chunk_size;
2873 read_size += chunk_size;
2874 }
2875
2876 *d_ptr = (void *)data_ptr;
2877
2878error_exit:
2879 if (rdmem_buffer)
2880 dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
2881 rdmem_buffer, rdmem_dma);
2882
2883 return rval;
2884}
2885
2886/*
2887 *
2888 * qla8044_collect_md_data - Retrieve firmware minidump data.
2889 * @ha: pointer to adapter structure
2890 **/
2891int
2892qla8044_collect_md_data(struct scsi_qla_host *vha)
2893{
2894 int num_entry_hdr = 0;
2895 struct qla8044_minidump_entry_hdr *entry_hdr;
2896 struct qla8044_minidump_template_hdr *tmplt_hdr;
2897 uint32_t *data_ptr;
2898 uint32_t data_collected = 0, f_capture_mask;
2899 int i, rval = QLA_FUNCTION_FAILED;
2900 uint64_t now;
2901 uint32_t timestamp, idc_control;
2902 struct qla_hw_data *ha = vha->hw;
2903
2904 if (!ha->md_dump) {
2905 ql_log(ql_log_info, vha, 0xb101,
2906 "%s(%ld) No buffer to dump\n",
2907 __func__, vha->host_no);
2908 return rval;
2909 }
2910
2911 if (ha->fw_dumped) {
2912 ql_log(ql_log_warn, vha, 0xb10d,
2913 "Firmware has been previously dumped (%p) "
2914 "-- ignoring request.\n", ha->fw_dump);
2915 goto md_failed;
2916 }
2917
2918 ha->fw_dumped = 0;
2919
2920 if (!ha->md_tmplt_hdr || !ha->md_dump) {
2921 ql_log(ql_log_warn, vha, 0xb10e,
2922 "Memory not allocated for minidump capture\n");
2923 goto md_failed;
2924 }
2925
2926 qla8044_idc_lock(ha);
2927 idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
2928 if (idc_control & GRACEFUL_RESET_BIT1) {
2929 ql_log(ql_log_warn, vha, 0xb112,
2930 "Forced reset from application, "
2931 "ignore minidump capture\n");
2932 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
2933 (idc_control & ~GRACEFUL_RESET_BIT1));
2934 qla8044_idc_unlock(ha);
2935
2936 goto md_failed;
2937 }
2938 qla8044_idc_unlock(ha);
2939
2940 if (qla82xx_validate_template_chksum(vha)) {
2941 ql_log(ql_log_info, vha, 0xb109,
2942 "Template checksum validation error\n");
2943 goto md_failed;
2944 }
2945
2946 tmplt_hdr = (struct qla8044_minidump_template_hdr *)
2947 ha->md_tmplt_hdr;
2948 data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
2949 num_entry_hdr = tmplt_hdr->num_of_entries;
2950
2951 ql_dbg(ql_dbg_p3p, vha, 0xb11a,
2952 "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
2953
2954 f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
2955
2956 /* Validate whether required debug level is set */
2957 if ((f_capture_mask & 0x3) != 0x3) {
2958 ql_log(ql_log_warn, vha, 0xb10f,
2959 "Minimum required capture mask[0x%x] level not set\n",
2960 f_capture_mask);
2961
2962 }
2963 tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
2964 ql_log(ql_log_info, vha, 0xb102,
2965 "[%s]: starting data ptr: %p\n",
2966 __func__, data_ptr);
2967 ql_log(ql_log_info, vha, 0xb10b,
2968 "[%s]: no of entry headers in Template: 0x%x\n",
2969 __func__, num_entry_hdr);
2970 ql_log(ql_log_info, vha, 0xb10c,
2971 "[%s]: Total_data_size 0x%x, %d obtained\n",
2972 __func__, ha->md_dump_size, ha->md_dump_size);
2973
2974 /* Update current timestamp before taking dump */
2975 now = get_jiffies_64();
2976 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
2977 tmplt_hdr->driver_timestamp = timestamp;
2978
2979 entry_hdr = (struct qla8044_minidump_entry_hdr *)
2980 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
2981 tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
2982 tmplt_hdr->ocm_window_reg[ha->portnum];
2983
2984 /* Walk through the entry headers - validate/perform required action */
2985 for (i = 0; i < num_entry_hdr; i++) {
2986 if (data_collected > ha->md_dump_size) {
2987 ql_log(ql_log_info, vha, 0xb103,
2988 "Data collected: [0x%x], "
2989 "Total Dump size: [0x%x]\n",
2990 data_collected, ha->md_dump_size);
2991 return rval;
2992 }
2993
2994 if (!(entry_hdr->d_ctrl.entry_capture_mask &
2995 ql2xmdcapmask)) {
2996 entry_hdr->d_ctrl.driver_flags |=
2997 QLA82XX_DBG_SKIPPED_FLAG;
2998 goto skip_nxt_entry;
2999 }
3000
3001 ql_dbg(ql_dbg_p3p, vha, 0xb104,
3002 "Data collected: [0x%x], Dump size left:[0x%x]\n",
3003 data_collected,
3004 (ha->md_dump_size - data_collected));
3005
3006 /* Decode the entry type and take required action to capture
3007 * debug data
3008 */
3009 switch (entry_hdr->entry_type) {
3010 case QLA82XX_RDEND:
3011 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3012 break;
3013 case QLA82XX_CNTRL:
3014 rval = qla8044_minidump_process_control(vha,
3015 entry_hdr);
3016 if (rval != QLA_SUCCESS) {
3017 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3018 goto md_failed;
3019 }
3020 break;
3021 case QLA82XX_RDCRB:
3022 qla8044_minidump_process_rdcrb(vha,
3023 entry_hdr, &data_ptr);
3024 break;
3025 case QLA82XX_RDMEM:
3026 rval = qla8044_minidump_pex_dma_read(vha,
3027 entry_hdr, &data_ptr);
3028 if (rval != QLA_SUCCESS) {
3029 rval = qla8044_minidump_process_rdmem(vha,
3030 entry_hdr, &data_ptr);
3031 if (rval != QLA_SUCCESS) {
3032 qla8044_mark_entry_skipped(vha,
3033 entry_hdr, i);
3034 goto md_failed;
3035 }
3036 }
3037 break;
3038 case QLA82XX_BOARD:
3039 case QLA82XX_RDROM:
3040 rval = qla8044_minidump_process_rdrom(vha,
3041 entry_hdr, &data_ptr);
3042 if (rval != QLA_SUCCESS) {
3043 qla8044_mark_entry_skipped(vha,
3044 entry_hdr, i);
3045 }
3046 break;
3047 case QLA82XX_L2DTG:
3048 case QLA82XX_L2ITG:
3049 case QLA82XX_L2DAT:
3050 case QLA82XX_L2INS:
3051 rval = qla8044_minidump_process_l2tag(vha,
3052 entry_hdr, &data_ptr);
3053 if (rval != QLA_SUCCESS) {
3054 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3055 goto md_failed;
3056 }
3057 break;
3058 case QLA8044_L1DTG:
3059 case QLA8044_L1ITG:
3060 case QLA82XX_L1DAT:
3061 case QLA82XX_L1INS:
3062 qla8044_minidump_process_l1cache(vha,
3063 entry_hdr, &data_ptr);
3064 break;
3065 case QLA82XX_RDOCM:
3066 qla8044_minidump_process_rdocm(vha,
3067 entry_hdr, &data_ptr);
3068 break;
3069 case QLA82XX_RDMUX:
3070 qla8044_minidump_process_rdmux(vha,
3071 entry_hdr, &data_ptr);
3072 break;
3073 case QLA82XX_QUEUE:
3074 qla8044_minidump_process_queue(vha,
3075 entry_hdr, &data_ptr);
3076 break;
3077 case QLA8044_POLLRD:
3078 rval = qla8044_minidump_process_pollrd(vha,
3079 entry_hdr, &data_ptr);
3080 if (rval != QLA_SUCCESS)
3081 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3082 break;
3083 case QLA8044_RDMUX2:
3084 qla8044_minidump_process_rdmux2(vha,
3085 entry_hdr, &data_ptr);
3086 break;
3087 case QLA8044_POLLRDMWR:
3088 rval = qla8044_minidump_process_pollrdmwr(vha,
3089 entry_hdr, &data_ptr);
3090 if (rval != QLA_SUCCESS)
3091 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3092 break;
3093 case QLA82XX_RDNOP:
3094 default:
3095 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3096 break;
3097 }
3098
3099 data_collected = (uint8_t *)data_ptr -
3100 (uint8_t *)((uint8_t *)ha->md_dump);
3101skip_nxt_entry:
3102 /*
3103 * next entry in the template
3104 */
3105 entry_hdr = (struct qla8044_minidump_entry_hdr *)
3106 (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
3107 }
3108
3109 if (data_collected != ha->md_dump_size) {
3110 ql_log(ql_log_info, vha, 0xb105,
3111 "Dump data mismatch: Data collected: "
3112 "[0x%x], total_data_size:[0x%x]\n",
3113 data_collected, ha->md_dump_size);
3114 goto md_failed;
3115 }
3116
3117 ql_log(ql_log_info, vha, 0xb110,
3118 "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
3119 vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
3120 ha->fw_dumped = 1;
3121 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
3122
3123
3124 ql_log(ql_log_info, vha, 0xb106,
3125 "Leaving fn: %s Last entry: 0x%x\n",
3126 __func__, i);
3127md_failed:
3128 return rval;
3129}
3130
3131void
3132qla8044_get_minidump(struct scsi_qla_host *vha)
3133{
3134 struct qla_hw_data *ha = vha->hw;
3135
3136 if (!qla8044_collect_md_data(vha)) {
3137 ha->fw_dumped = 1;
3138 } else {
3139 ql_log(ql_log_fatal, vha, 0xb0db,
3140 "%s: Unable to collect minidump\n",
3141 __func__);
3142 }
3143}
3144
3145static int
3146qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
3147{
3148 uint32_t flash_status;
3149 int retries = QLA8044_FLASH_READ_RETRY_COUNT;
3150 int ret_val = QLA_SUCCESS;
3151
3152 while (retries--) {
3153 ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
3154 &flash_status);
3155 if (ret_val) {
3156 ql_log(ql_log_warn, vha, 0xb13c,
3157 "%s: Failed to read FLASH_STATUS reg.\n",
3158 __func__);
3159 break;
3160 }
3161 if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
3162 QLA8044_FLASH_STATUS_READY)
3163 break;
3164 msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
3165 }
3166
3167 if (!retries)
3168 ret_val = QLA_FUNCTION_FAILED;
3169
3170 return ret_val;
3171}
3172
3173static int
3174qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
3175 uint32_t data)
3176{
3177 int ret_val = QLA_SUCCESS;
3178 uint32_t cmd;
3179
3180 cmd = vha->hw->fdt_wrt_sts_reg_cmd;
3181
3182 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3183 QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
3184 if (ret_val) {
3185 ql_log(ql_log_warn, vha, 0xb125,
3186 "%s: Failed to write to FLASH_ADDR.\n", __func__);
3187 goto exit_func;
3188 }
3189
3190 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
3191 if (ret_val) {
3192 ql_log(ql_log_warn, vha, 0xb126,
3193 "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3194 goto exit_func;
3195 }
3196
3197 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3198 QLA8044_FLASH_SECOND_ERASE_MS_VAL);
3199 if (ret_val) {
3200 ql_log(ql_log_warn, vha, 0xb127,
3201 "%s: Failed to write to FLASH_CONTROL.\n", __func__);
3202 goto exit_func;
3203 }
3204
3205 ret_val = qla8044_poll_flash_status_reg(vha);
3206 if (ret_val)
3207 ql_log(ql_log_warn, vha, 0xb128,
3208 "%s: Error polling flash status reg.\n", __func__);
3209
3210exit_func:
3211 return ret_val;
3212}
3213
3214/*
3215 * This function assumes that the flash lock is held.
3216 */
3217static int
3218qla8044_unprotect_flash(scsi_qla_host_t *vha)
3219{
3220 int ret_val;
3221 struct qla_hw_data *ha = vha->hw;
3222
3223 ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
3224 if (ret_val)
3225 ql_log(ql_log_warn, vha, 0xb139,
3226 "%s: Write flash status failed.\n", __func__);
3227
3228 return ret_val;
3229}
3230
3231/*
3232 * This function assumes that the flash lock is held.
3233 */
3234static int
3235qla8044_protect_flash(scsi_qla_host_t *vha)
3236{
3237 int ret_val;
3238 struct qla_hw_data *ha = vha->hw;
3239
3240 ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
3241 if (ret_val)
3242 ql_log(ql_log_warn, vha, 0xb13b,
3243 "%s: Write flash status failed.\n", __func__);
3244
3245 return ret_val;
3246}
3247
3248
3249static int
3250qla8044_erase_flash_sector(struct scsi_qla_host *vha,
3251 uint32_t sector_start_addr)
3252{
3253 uint32_t reversed_addr;
3254 int ret_val = QLA_SUCCESS;
3255
3256 ret_val = qla8044_poll_flash_status_reg(vha);
3257 if (ret_val) {
3258 ql_log(ql_log_warn, vha, 0xb12e,
3259 "%s: Poll flash status after erase failed..\n", __func__);
3260 }
3261
3262 reversed_addr = (((sector_start_addr & 0xFF) << 16) |
3263 (sector_start_addr & 0xFF00) |
3264 ((sector_start_addr & 0xFF0000) >> 16));
3265
3266 ret_val = qla8044_wr_reg_indirect(vha,
3267 QLA8044_FLASH_WRDATA, reversed_addr);
3268 if (ret_val) {
3269 ql_log(ql_log_warn, vha, 0xb12f,
3270 "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3271 }
3272 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3273 QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
3274 if (ret_val) {
3275 ql_log(ql_log_warn, vha, 0xb130,
3276 "%s: Failed to write to FLASH_ADDR.\n", __func__);
3277 }
3278 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3279 QLA8044_FLASH_LAST_ERASE_MS_VAL);
3280 if (ret_val) {
3281 ql_log(ql_log_warn, vha, 0xb131,
3282 "%s: Failed write to FLASH_CONTROL.\n", __func__);
3283 }
3284 ret_val = qla8044_poll_flash_status_reg(vha);
3285 if (ret_val) {
3286 ql_log(ql_log_warn, vha, 0xb132,
3287 "%s: Poll flash status failed.\n", __func__);
3288 }
3289
3290
3291 return ret_val;
3292}
3293
3294/*
3295 * qla8044_flash_write_u32 - Write data to flash
3296 *
3297 * @ha : Pointer to adapter structure
3298 * addr : Flash address to write to
3299 * p_data : Data to be written
3300 *
3301 * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
3302 *
3303 * NOTE: Lock should be held on entry
3304 */
3305static int
3306qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
3307 uint32_t *p_data)
3308{
3309 int ret_val = QLA_SUCCESS;
3310
3311 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3312 0x00800000 | (addr >> 2));
3313 if (ret_val) {
3314 ql_log(ql_log_warn, vha, 0xb134,
3315 "%s: Failed write to FLASH_ADDR.\n", __func__);
3316 goto exit_func;
3317 }
3318 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
3319 if (ret_val) {
3320 ql_log(ql_log_warn, vha, 0xb135,
3321 "%s: Failed write to FLASH_WRDATA.\n", __func__);
3322 goto exit_func;
3323 }
3324 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
3325 if (ret_val) {
3326 ql_log(ql_log_warn, vha, 0xb136,
3327 "%s: Failed write to FLASH_CONTROL.\n", __func__);
3328 goto exit_func;
3329 }
3330 ret_val = qla8044_poll_flash_status_reg(vha);
3331 if (ret_val) {
3332 ql_log(ql_log_warn, vha, 0xb137,
3333 "%s: Poll flash status failed.\n", __func__);
3334 }
3335
3336exit_func:
3337 return ret_val;
3338}
3339
3340static int
3341qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3342 uint32_t faddr, uint32_t dwords)
3343{
3344 int ret = QLA_FUNCTION_FAILED;
3345 uint32_t spi_val;
3346
3347 if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
3348 dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
3349 ql_dbg(ql_dbg_user, vha, 0xb123,
3350 "Got unsupported dwords = 0x%x.\n",
3351 dwords);
3352 return QLA_FUNCTION_FAILED;
3353 }
3354
3355 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
3356 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3357 spi_val | QLA8044_FLASH_SPI_CTL);
3358 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3359 QLA8044_FLASH_FIRST_TEMP_VAL);
3360
3361 /* First DWORD write to FLASH_WRDATA */
3362 ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
3363 *dwptr++);
3364 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3365 QLA8044_FLASH_FIRST_MS_PATTERN);
3366
3367 ret = qla8044_poll_flash_status_reg(vha);
3368 if (ret) {
3369 ql_log(ql_log_warn, vha, 0xb124,
3370 "%s: Failed.\n", __func__);
3371 goto exit_func;
3372 }
3373
3374 dwords--;
3375
3376 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3377 QLA8044_FLASH_SECOND_TEMP_VAL);
3378
3379
3380 /* Second to N-1 DWORDS writes */
3381 while (dwords != 1) {
3382 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3383 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3384 QLA8044_FLASH_SECOND_MS_PATTERN);
3385 ret = qla8044_poll_flash_status_reg(vha);
3386 if (ret) {
3387 ql_log(ql_log_warn, vha, 0xb129,
3388 "%s: Failed.\n", __func__);
3389 goto exit_func;
3390 }
3391 dwords--;
3392 }
3393
3394 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3395 QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
3396
3397 /* Last DWORD write */
3398 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3399 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3400 QLA8044_FLASH_LAST_MS_PATTERN);
3401 ret = qla8044_poll_flash_status_reg(vha);
3402 if (ret) {
3403 ql_log(ql_log_warn, vha, 0xb12a,
3404 "%s: Failed.\n", __func__);
3405 goto exit_func;
3406 }
3407 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
3408
3409 if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
3410 ql_log(ql_log_warn, vha, 0xb12b,
3411 "%s: Failed.\n", __func__);
3412 spi_val = 0;
3413 /* Operation failed, clear error bit. */
3414 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3415 &spi_val);
3416 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3417 spi_val | QLA8044_FLASH_SPI_CTL);
3418 }
3419exit_func:
3420 return ret;
3421}
3422
3423static int
3424qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3425 uint32_t faddr, uint32_t dwords)
3426{
3427 int ret = QLA_FUNCTION_FAILED;
3428 uint32_t liter;
3429
3430 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3431 ret = qla8044_flash_write_u32(vha, faddr, dwptr);
3432 if (ret) {
3433 ql_dbg(ql_dbg_p3p, vha, 0xb141,
3434 "%s: flash address=%x data=%x.\n", __func__,
3435 faddr, *dwptr);
3436 break;
3437 }
3438 }
3439
3440 return ret;
3441}
3442
3443int
3444qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3445 uint32_t offset, uint32_t length)
3446{
3447 int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
3448 int dword_count, erase_sec_count;
3449 uint32_t erase_offset;
3450 uint8_t *p_cache, *p_src;
3451
3452 erase_offset = offset;
3453
3454 p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
3455 if (!p_cache)
3456 return QLA_FUNCTION_FAILED;
3457
3458 memcpy(p_cache, buf, length);
3459 p_src = p_cache;
3460 dword_count = length / sizeof(uint32_t);
3461 /* Since the offset and legth are sector aligned, it will be always
3462 * multiple of burst_iter_count (64)
3463 */
3464 burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
3465 erase_sec_count = length / QLA8044_SECTOR_SIZE;
3466
3467 /* Suspend HBA. */
3468 scsi_block_requests(vha->host);
3469 /* Lock and enable write for whole operation. */
3470 qla8044_flash_lock(vha);
3471 qla8044_unprotect_flash(vha);
3472
3473 /* Erasing the sectors */
3474 for (i = 0; i < erase_sec_count; i++) {
3475 rval = qla8044_erase_flash_sector(vha, erase_offset);
3476 ql_dbg(ql_dbg_user, vha, 0xb138,
3477 "Done erase of sector=0x%x.\n",
3478 erase_offset);
3479 if (rval) {
3480 ql_log(ql_log_warn, vha, 0xb121,
3481 "Failed to erase the sector having address: "
3482 "0x%x.\n", erase_offset);
3483 goto out;
3484 }
3485 erase_offset += QLA8044_SECTOR_SIZE;
3486 }
3487 ql_dbg(ql_dbg_user, vha, 0xb13f,
3488 "Got write for addr = 0x%x length=0x%x.\n",
3489 offset, length);
3490
3491 for (i = 0; i < burst_iter_count; i++) {
3492
3493 /* Go with write. */
3494 rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
3495 offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
3496 if (rval) {
3497 /* Buffer Mode failed skip to dword mode */
3498 ql_log(ql_log_warn, vha, 0xb122,
3499 "Failed to write flash in buffer mode, "
3500 "Reverting to slow-write.\n");
3501 rval = qla8044_write_flash_dword_mode(vha,
3502 (uint32_t *)p_src, offset,
3503 QLA8044_MAX_OPTROM_BURST_DWORDS);
3504 }
3505 p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3506 offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3507 }
3508 ql_dbg(ql_dbg_user, vha, 0xb133,
3509 "Done writing.\n");
3510
3511out:
3512 qla8044_protect_flash(vha);
3513 qla8044_flash_unlock(vha);
3514 scsi_unblock_requests(vha->host);
3515 kfree(p_cache);
3516
3517 return rval;
3518}
3519
3520#define LEG_INT_PTR_B31 (1 << 31)
3521#define LEG_INT_PTR_B30 (1 << 30)
3522#define PF_BITS_MASK (0xF << 16)
3523/**
3524 * qla8044_intr_handler() - Process interrupts for the ISP8044
3525 * @irq:
3526 * @dev_id: SCSI driver HA context
3527 *
3528 * Called by system whenever the host adapter generates an interrupt.
3529 *
3530 * Returns handled flag.
3531 */
3532irqreturn_t
3533qla8044_intr_handler(int irq, void *dev_id)
3534{
3535 scsi_qla_host_t *vha;
3536 struct qla_hw_data *ha;
3537 struct rsp_que *rsp;
3538 struct device_reg_82xx __iomem *reg;
3539 int status = 0;
3540 unsigned long flags;
3541 unsigned long iter;
3542 uint32_t stat;
3543 uint16_t mb[4];
3544 uint32_t leg_int_ptr = 0, pf_bit;
3545
3546 rsp = (struct rsp_que *) dev_id;
3547 if (!rsp) {
3548 ql_log(ql_log_info, NULL, 0xb143,
3549 "%s(): NULL response queue pointer\n", __func__);
3550 return IRQ_NONE;
3551 }
3552 ha = rsp->hw;
3553 vha = pci_get_drvdata(ha->pdev);
3554
3555 if (unlikely(pci_channel_offline(ha->pdev)))
3556 return IRQ_HANDLED;
3557
3558 leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3559
3560 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
3561 if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
3562 ql_dbg(ql_dbg_p3p, vha, 0xb144,
3563 "%s: Legacy Interrupt Bit 31 not set, "
3564 "spurious interrupt!\n", __func__);
3565 return IRQ_NONE;
3566 }
3567
3568 pf_bit = ha->portnum << 16;
3569 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
3570 if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
3571 ql_dbg(ql_dbg_p3p, vha, 0xb145,
3572 "%s: Incorrect function ID 0x%x in "
3573 "legacy interrupt register, "
3574 "ha->pf_bit = 0x%x\n", __func__,
3575 (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
3576 return IRQ_NONE;
3577 }
3578
3579 /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
3580 * Control register and poll till Legacy Interrupt Pointer register
3581 * bit32 is 0.
3582 */
3583 qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
3584 do {
3585 leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3586 if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
3587 break;
3588 } while (leg_int_ptr & (LEG_INT_PTR_B30));
3589
3590 reg = &ha->iobase->isp82;
3591 spin_lock_irqsave(&ha->hardware_lock, flags);
3592 for (iter = 1; iter--; ) {
3593
3594 if (RD_REG_DWORD(&reg->host_int)) {
3595 stat = RD_REG_DWORD(&reg->host_status);
3596 if ((stat & HSRX_RISC_INT) == 0)
3597 break;
3598
3599 switch (stat & 0xff) {
3600 case 0x1:
3601 case 0x2:
3602 case 0x10:
3603 case 0x11:
3604 qla82xx_mbx_completion(vha, MSW(stat));
3605 status |= MBX_INTERRUPT;
3606 break;
3607 case 0x12:
3608 mb[0] = MSW(stat);
3609 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
3610 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
3611 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
3612 qla2x00_async_event(vha, rsp, mb);
3613 break;
3614 case 0x13:
3615 qla24xx_process_response_queue(vha, rsp);
3616 break;
3617 default:
3618 ql_dbg(ql_dbg_p3p, vha, 0xb146,
3619 "Unrecognized interrupt type "
3620 "(%d).\n", stat & 0xff);
3621 break;
3622 }
3623 }
3624 WRT_REG_DWORD(&reg->host_int, 0);
3625 }
3626
3627 qla2x00_handle_mbx_completion(ha, status);
3628 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3629
3630 return IRQ_HANDLED;
3631}
3632
3633static int
3634qla8044_idc_dontreset(struct qla_hw_data *ha)
3635{
3636 uint32_t idc_ctrl;
3637
3638 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3639 return idc_ctrl & DONTRESET_BIT0;
3640}
3641
3642static void
3643qla8044_clear_rst_ready(scsi_qla_host_t *vha)
3644{
3645 uint32_t drv_state;
3646
3647 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
3648
3649 /*
3650 * For ISP8044, drv_active register has 1 bit per function,
3651 * shift 1 by func_num to set a bit for the function.
3652 * For ISP82xx, drv_active has 4 bits per function
3653 */
3654 drv_state &= ~(1 << vha->hw->portnum);
3655
3656 ql_dbg(ql_dbg_p3p, vha, 0xb13d,
3657 "drv_state: 0x%08x\n", drv_state);
3658 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
3659}
3660
3661int
3662qla8044_abort_isp(scsi_qla_host_t *vha)
3663{
3664 int rval;
3665 uint32_t dev_state;
3666 struct qla_hw_data *ha = vha->hw;
3667
3668 qla8044_idc_lock(ha);
3669 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
3670
3671 if (ql2xdontresethba)
3672 qla8044_set_idc_dontreset(vha);
3673
3674 /* If device_state is NEED_RESET, go ahead with
3675 * Reset,irrespective of ql2xdontresethba. This is to allow a
3676 * non-reset-owner to force a reset. Non-reset-owner sets
3677 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
3678 * and then forces a Reset by setting device_state to
3679 * NEED_RESET. */
3680 if (dev_state == QLA8XXX_DEV_READY) {
3681 /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
3682 * recovery */
3683 if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
3684 ql_dbg(ql_dbg_p3p, vha, 0xb13e,
3685 "Reset recovery disabled\n");
3686 rval = QLA_FUNCTION_FAILED;
3687 goto exit_isp_reset;
3688 }
3689
3690 ql_dbg(ql_dbg_p3p, vha, 0xb140,
3691 "HW State: NEED RESET\n");
3692 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
3693 QLA8XXX_DEV_NEED_RESET);
3694 }
3695
3696 /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
3697 * and which drivers are present. Unlike ISP82XX, the function setting
3698 * NEED_RESET, may not be the Reset owner. */
3699 qla83xx_reset_ownership(vha);
3700
3701 qla8044_idc_unlock(ha);
3702 rval = qla8044_device_state_handler(vha);
3703 qla8044_idc_lock(ha);
3704 qla8044_clear_rst_ready(vha);
3705
3706exit_isp_reset:
3707 qla8044_idc_unlock(ha);
3708 if (rval == QLA_SUCCESS) {
3709 ha->flags.isp82xx_fw_hung = 0;
3710 ha->flags.nic_core_reset_hdlr_active = 0;
3711 rval = qla82xx_restart_isp(vha);
3712 }
3713
3714 return rval;
3715}
3716
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
new file mode 100644
index 000000000000..2ab2eabab908
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -0,0 +1,551 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8#ifndef __QLA_NX2_H
9#define __QLA_NX2_H
10
11#define QSNT_ACK_TOV 30
12#define INTENT_TO_RECOVER 0x01
13#define PROCEED_TO_RECOVER 0x02
14#define IDC_LOCK_RECOVERY_OWNER_MASK 0x3C
15#define IDC_LOCK_RECOVERY_STATE_MASK 0x3
16#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS 2
17
18#define QLA8044_DRV_LOCK_MSLEEP 200
19#define QLA8044_ADDR_DDR_NET (0x0000000000000000ULL)
20#define QLA8044_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
21
22#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
23#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
24#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
25#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
26#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
27#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
28#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
29#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
30
31/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
32#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
33#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE | \
34 MIU_TA_CTL_START)
35#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
36
37/* Imbus address bit used to indicate a host address. This bit is
38 * eliminated by the pcie bar and bar select before presentation
39 * over pcie. */
40/* host memory via IMBUS */
41#define QLA8044_P2_ADDR_PCIE (0x0000000800000000ULL)
42#define QLA8044_P3_ADDR_PCIE (0x0000008000000000ULL)
43#define QLA8044_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
44#define QLA8044_ADDR_OCM0 (0x0000000200000000ULL)
45#define QLA8044_ADDR_OCM0_MAX (0x00000002000fffffULL)
46#define QLA8044_ADDR_OCM1 (0x0000000200400000ULL)
47#define QLA8044_ADDR_OCM1_MAX (0x00000002004fffffULL)
48#define QLA8044_ADDR_QDR_NET (0x0000000300000000ULL)
49#define QLA8044_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
50#define QLA8044_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
51#define QLA8044_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
52#define QLA8044_PCI_CRBSPACE ((unsigned long)0x06000000)
53#define QLA8044_PCI_DIRECT_CRB ((unsigned long)0x04400000)
54#define QLA8044_PCI_CAMQM ((unsigned long)0x04800000)
55#define QLA8044_PCI_CAMQM_MAX ((unsigned long)0x04ffffff)
56#define QLA8044_PCI_DDR_NET ((unsigned long)0x00000000)
57#define QLA8044_PCI_QDR_NET ((unsigned long)0x04000000)
58#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff)
59
60/* PCI Windowing for DDR regions. */
61#define QLA8044_ADDR_IN_RANGE(addr, low, high) \
62 (((addr) <= (high)) && ((addr) >= (low)))
63
64/* Indirectly Mapped Registers */
65#define QLA8044_FLASH_SPI_STATUS 0x2808E010
66#define QLA8044_FLASH_SPI_CONTROL 0x2808E014
67#define QLA8044_FLASH_STATUS 0x42100004
68#define QLA8044_FLASH_CONTROL 0x42110004
69#define QLA8044_FLASH_ADDR 0x42110008
70#define QLA8044_FLASH_WRDATA 0x4211000C
71#define QLA8044_FLASH_RDDATA 0x42110018
72#define QLA8044_FLASH_DIRECT_WINDOW 0x42110030
73#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
74
75/* Flash access regs */
76#define QLA8044_FLASH_LOCK 0x3850
77#define QLA8044_FLASH_UNLOCK 0x3854
78#define QLA8044_FLASH_LOCK_ID 0x3500
79
80/* Driver Lock regs */
81#define QLA8044_DRV_LOCK 0x3868
82#define QLA8044_DRV_UNLOCK 0x386C
83#define QLA8044_DRV_LOCK_ID 0x3504
84#define QLA8044_DRV_LOCKRECOVERY 0x379C
85
86/* IDC version */
87#define QLA8044_IDC_VER_MAJ_VALUE 0x1
88#define QLA8044_IDC_VER_MIN_VALUE 0x0
89
90/* IDC Registers : Driver Coexistence Defines */
91#define QLA8044_CRB_IDC_VER_MAJOR 0x3780
92#define QLA8044_CRB_IDC_VER_MINOR 0x3798
93#define QLA8044_IDC_DRV_AUDIT 0x3794
94#define QLA8044_SRE_SHIM_CONTROL 0x0D200284
95#define QLA8044_PORT0_RXB_PAUSE_THRS 0x0B2003A4
96#define QLA8044_PORT1_RXB_PAUSE_THRS 0x0B2013A4
97#define QLA8044_PORT0_RXB_TC_MAX_CELL 0x0B200388
98#define QLA8044_PORT1_RXB_TC_MAX_CELL 0x0B201388
99#define QLA8044_PORT0_RXB_TC_STATS 0x0B20039C
100#define QLA8044_PORT1_RXB_TC_STATS 0x0B20139C
101#define QLA8044_PORT2_IFB_PAUSE_THRS 0x0B200704
102#define QLA8044_PORT3_IFB_PAUSE_THRS 0x0B201704
103
104/* set value to pause threshold value */
105#define QLA8044_SET_PAUSE_VAL 0x0
106#define QLA8044_SET_TC_MAX_CELL_VAL 0x03FF03FF
107#define QLA8044_PEG_HALT_STATUS1 0x34A8
108#define QLA8044_PEG_HALT_STATUS2 0x34AC
109#define QLA8044_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
110#define QLA8044_FW_CAPABILITIES 0x3528
111#define QLA8044_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
112#define QLA8044_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
113#define QLA8044_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
114#define QLA8044_CRB_DRV_SCRATCH 0x3548
115#define QLA8044_CRB_DEV_PART_INFO1 0x37E0
116#define QLA8044_CRB_DEV_PART_INFO2 0x37E4
117#define QLA8044_FW_VER_MAJOR 0x3550
118#define QLA8044_FW_VER_MINOR 0x3554
119#define QLA8044_FW_VER_SUB 0x3558
120#define QLA8044_NPAR_STATE 0x359C
121#define QLA8044_FW_IMAGE_VALID 0x35FC
122#define QLA8044_CMDPEG_STATE 0x3650
123#define QLA8044_ASIC_TEMP 0x37B4
124#define QLA8044_FW_API 0x356C
125#define QLA8044_DRV_OP_MODE 0x3570
126#define QLA8044_CRB_WIN_BASE 0x3800
127#define QLA8044_CRB_WIN_FUNC(f) (QLA8044_CRB_WIN_BASE+((f)*4))
128#define QLA8044_SEM_LOCK_BASE 0x3840
129#define QLA8044_SEM_UNLOCK_BASE 0x3844
130#define QLA8044_SEM_LOCK_FUNC(f) (QLA8044_SEM_LOCK_BASE+((f)*8))
131#define QLA8044_SEM_UNLOCK_FUNC(f) (QLA8044_SEM_UNLOCK_BASE+((f)*8))
132#define QLA8044_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
133#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
134#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
135#define QLA8044_LINK_SPEED_FACTOR 10
136
137/* FLASH API Defines */
138#define QLA8044_FLASH_MAX_WAIT_USEC 100
139#define QLA8044_FLASH_LOCK_TIMEOUT 10000
140#define QLA8044_FLASH_SECTOR_SIZE 65536
141#define QLA8044_DRV_LOCK_TIMEOUT 2000
142#define QLA8044_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
143#define QLA8044_FLASH_WRITE_CMD 0xdacdacda
144#define QLA8044_FLASH_BUFFER_WRITE_CMD 0xcadcadca
145#define QLA8044_FLASH_READ_RETRY_COUNT 2000
146#define QLA8044_FLASH_STATUS_READY 0x6
147#define QLA8044_FLASH_BUFFER_WRITE_MIN 2
148#define QLA8044_FLASH_BUFFER_WRITE_MAX 64
149#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1
150#define QLA8044_ERASE_MODE 1
151#define QLA8044_WRITE_MODE 2
152#define QLA8044_DWORD_WRITE_MODE 3
153#define QLA8044_GLOBAL_RESET 0x38CC
154#define QLA8044_WILDCARD 0x38F0
155#define QLA8044_INFORMANT 0x38FC
156#define QLA8044_HOST_MBX_CTRL 0x3038
157#define QLA8044_FW_MBX_CTRL 0x303C
158#define QLA8044_BOOTLOADER_ADDR 0x355C
159#define QLA8044_BOOTLOADER_SIZE 0x3560
160#define QLA8044_FW_IMAGE_ADDR 0x3564
161#define QLA8044_MBX_INTR_ENABLE 0x1000
162#define QLA8044_MBX_INTR_MASK 0x1200
163
164/* IDC Control Register bit defines */
165#define DONTRESET_BIT0 0x1
166#define GRACEFUL_RESET_BIT1 0x2
167
168/* ISP8044 PEG_HALT_STATUS1 bits */
169#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29)
170#define QLA8044_HALT_STATUS_FW_RESET (0x2 << 29)
171#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
172
173/* Firmware image definitions */
174#define QLA8044_BOOTLOADER_FLASH_ADDR 0x10000
175#define QLA8044_BOOT_FROM_FLASH 0
176#define QLA8044_IDC_PARAM_ADDR 0x3e8020
177
178/* FLASH related definitions */
179#define QLA8044_OPTROM_BURST_SIZE 0x100
180#define QLA8044_MAX_OPTROM_BURST_DWORDS (QLA8044_OPTROM_BURST_SIZE / 4)
181#define QLA8044_MIN_OPTROM_BURST_DWORDS 2
182#define QLA8044_SECTOR_SIZE (64 * 1024)
183
184#define QLA8044_FLASH_SPI_CTL 0x4
185#define QLA8044_FLASH_FIRST_TEMP_VAL 0x00800000
186#define QLA8044_FLASH_SECOND_TEMP_VAL 0x00800001
187#define QLA8044_FLASH_FIRST_MS_PATTERN 0x43
188#define QLA8044_FLASH_SECOND_MS_PATTERN 0x7F
189#define QLA8044_FLASH_LAST_MS_PATTERN 0x7D
190#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG 0xFD0100
191#define QLA8044_FLASH_SECOND_ERASE_MS_VAL 0x5
192#define QLA8044_FLASH_ERASE_SIG 0xFD0300
193#define QLA8044_FLASH_LAST_ERASE_MS_VAL 0x3D
194
195/* Reset template definitions */
196#define QLA8044_MAX_RESET_SEQ_ENTRIES 16
197#define QLA8044_RESTART_TEMPLATE_SIZE 0x2000
198#define QLA8044_RESET_TEMPLATE_ADDR 0x4F0000
199#define QLA8044_RESET_SEQ_VERSION 0x0101
200
201/* Reset template entry opcodes */
202#define OPCODE_NOP 0x0000
203#define OPCODE_WRITE_LIST 0x0001
204#define OPCODE_READ_WRITE_LIST 0x0002
205#define OPCODE_POLL_LIST 0x0004
206#define OPCODE_POLL_WRITE_LIST 0x0008
207#define OPCODE_READ_MODIFY_WRITE 0x0010
208#define OPCODE_SEQ_PAUSE 0x0020
209#define OPCODE_SEQ_END 0x0040
210#define OPCODE_TMPL_END 0x0080
211#define OPCODE_POLL_READ_LIST 0x0100
212
213/* Template Header */
214#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
215#define QLA8044_IDC_DRV_CTRL 0x3790
216#define AF_8044_NO_FW_DUMP 27 /* 0x08000000 */
217
218#define MINIDUMP_SIZE_36K 36864
219
220struct qla8044_reset_template_hdr {
221 uint16_t version;
222 uint16_t signature;
223 uint16_t size;
224 uint16_t entries;
225 uint16_t hdr_size;
226 uint16_t checksum;
227 uint16_t init_seq_offset;
228 uint16_t start_seq_offset;
229} __packed;
230
231/* Common Entry Header. */
232struct qla8044_reset_entry_hdr {
233 uint16_t cmd;
234 uint16_t size;
235 uint16_t count;
236 uint16_t delay;
237} __packed;
238
239/* Generic poll entry type. */
240struct qla8044_poll {
241 uint32_t test_mask;
242 uint32_t test_value;
243} __packed;
244
245/* Read modify write entry type. */
246struct qla8044_rmw {
247 uint32_t test_mask;
248 uint32_t xor_value;
249 uint32_t or_value;
250 uint8_t shl;
251 uint8_t shr;
252 uint8_t index_a;
253 uint8_t rsvd;
254} __packed;
255
256/* Generic Entry Item with 2 DWords. */
257struct qla8044_entry {
258 uint32_t arg1;
259 uint32_t arg2;
260} __packed;
261
262/* Generic Entry Item with 4 DWords.*/
263struct qla8044_quad_entry {
264 uint32_t dr_addr;
265 uint32_t dr_value;
266 uint32_t ar_addr;
267 uint32_t ar_value;
268} __packed;
269
270struct qla8044_reset_template {
271 int seq_index;
272 int seq_error;
273 int array_index;
274 uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES];
275 uint8_t *buff;
276 uint8_t *stop_offset;
277 uint8_t *start_offset;
278 uint8_t *init_offset;
279 struct qla8044_reset_template_hdr *hdr;
280 uint8_t seq_end;
281 uint8_t template_end;
282};
283
284/* Driver_code is for driver to write some info about the entry
285 * currently not used.
286 */
287struct qla8044_minidump_entry_hdr {
288 uint32_t entry_type;
289 uint32_t entry_size;
290 uint32_t entry_capture_size;
291 struct {
292 uint8_t entry_capture_mask;
293 uint8_t entry_code;
294 uint8_t driver_code;
295 uint8_t driver_flags;
296 } d_ctrl;
297} __packed;
298
299/* Read CRB entry header */
300struct qla8044_minidump_entry_crb {
301 struct qla8044_minidump_entry_hdr h;
302 uint32_t addr;
303 struct {
304 uint8_t addr_stride;
305 uint8_t state_index_a;
306 uint16_t poll_timeout;
307 } crb_strd;
308 uint32_t data_size;
309 uint32_t op_count;
310
311 struct {
312 uint8_t opcode;
313 uint8_t state_index_v;
314 uint8_t shl;
315 uint8_t shr;
316 } crb_ctrl;
317
318 uint32_t value_1;
319 uint32_t value_2;
320 uint32_t value_3;
321} __packed;
322
323struct qla8044_minidump_entry_cache {
324 struct qla8044_minidump_entry_hdr h;
325 uint32_t tag_reg_addr;
326 struct {
327 uint16_t tag_value_stride;
328 uint16_t init_tag_value;
329 } addr_ctrl;
330 uint32_t data_size;
331 uint32_t op_count;
332 uint32_t control_addr;
333 struct {
334 uint16_t write_value;
335 uint8_t poll_mask;
336 uint8_t poll_wait;
337 } cache_ctrl;
338 uint32_t read_addr;
339 struct {
340 uint8_t read_addr_stride;
341 uint8_t read_addr_cnt;
342 uint16_t rsvd_1;
343 } read_ctrl;
344} __packed;
345
346/* Read OCM */
347struct qla8044_minidump_entry_rdocm {
348 struct qla8044_minidump_entry_hdr h;
349 uint32_t rsvd_0;
350 uint32_t rsvd_1;
351 uint32_t data_size;
352 uint32_t op_count;
353 uint32_t rsvd_2;
354 uint32_t rsvd_3;
355 uint32_t read_addr;
356 uint32_t read_addr_stride;
357} __packed;
358
359/* Read Memory */
360struct qla8044_minidump_entry_rdmem {
361 struct qla8044_minidump_entry_hdr h;
362 uint32_t rsvd[6];
363 uint32_t read_addr;
364 uint32_t read_data_size;
365};
366
367/* Read Memory: For Pex-DMA */
368struct qla8044_minidump_entry_rdmem_pex_dma {
369 struct qla8044_minidump_entry_hdr h;
370 uint32_t desc_card_addr;
371 uint16_t dma_desc_cmd;
372 uint8_t rsvd[2];
373 uint32_t start_dma_cmd;
374 uint8_t rsvd2[12];
375 uint32_t read_addr;
376 uint32_t read_data_size;
377} __packed;
378
379/* Read ROM */
380struct qla8044_minidump_entry_rdrom {
381 struct qla8044_minidump_entry_hdr h;
382 uint32_t rsvd[6];
383 uint32_t read_addr;
384 uint32_t read_data_size;
385} __packed;
386
387/* Mux entry */
388struct qla8044_minidump_entry_mux {
389 struct qla8044_minidump_entry_hdr h;
390 uint32_t select_addr;
391 uint32_t rsvd_0;
392 uint32_t data_size;
393 uint32_t op_count;
394 uint32_t select_value;
395 uint32_t select_value_stride;
396 uint32_t read_addr;
397 uint32_t rsvd_1;
398} __packed;
399
400/* Queue entry */
401struct qla8044_minidump_entry_queue {
402 struct qla8044_minidump_entry_hdr h;
403 uint32_t select_addr;
404 struct {
405 uint16_t queue_id_stride;
406 uint16_t rsvd_0;
407 } q_strd;
408 uint32_t data_size;
409 uint32_t op_count;
410 uint32_t rsvd_1;
411 uint32_t rsvd_2;
412 uint32_t read_addr;
413 struct {
414 uint8_t read_addr_stride;
415 uint8_t read_addr_cnt;
416 uint16_t rsvd_3;
417 } rd_strd;
418} __packed;
419
420/* POLLRD Entry */
421struct qla8044_minidump_entry_pollrd {
422 struct qla8044_minidump_entry_hdr h;
423 uint32_t select_addr;
424 uint32_t read_addr;
425 uint32_t select_value;
426 uint16_t select_value_stride;
427 uint16_t op_count;
428 uint32_t poll_wait;
429 uint32_t poll_mask;
430 uint32_t data_size;
431 uint32_t rsvd_1;
432} __packed;
433
434/* RDMUX2 Entry */
435struct qla8044_minidump_entry_rdmux2 {
436 struct qla8044_minidump_entry_hdr h;
437 uint32_t select_addr_1;
438 uint32_t select_addr_2;
439 uint32_t select_value_1;
440 uint32_t select_value_2;
441 uint32_t op_count;
442 uint32_t select_value_mask;
443 uint32_t read_addr;
444 uint8_t select_value_stride;
445 uint8_t data_size;
446 uint8_t rsvd[2];
447} __packed;
448
449/* POLLRDMWR Entry */
450struct qla8044_minidump_entry_pollrdmwr {
451 struct qla8044_minidump_entry_hdr h;
452 uint32_t addr_1;
453 uint32_t addr_2;
454 uint32_t value_1;
455 uint32_t value_2;
456 uint32_t poll_wait;
457 uint32_t poll_mask;
458 uint32_t modify_mask;
459 uint32_t data_size;
460} __packed;
461
462/* IDC additional information */
463struct qla8044_idc_information {
464 uint32_t request_desc; /* IDC request descriptor */
465 uint32_t info1; /* IDC additional info */
466 uint32_t info2; /* IDC additional info */
467 uint32_t info3; /* IDC additional info */
468} __packed;
469
470enum qla_regs {
471 QLA8044_PEG_HALT_STATUS1_INDEX = 0,
472 QLA8044_PEG_HALT_STATUS2_INDEX,
473 QLA8044_PEG_ALIVE_COUNTER_INDEX,
474 QLA8044_CRB_DRV_ACTIVE_INDEX,
475 QLA8044_CRB_DEV_STATE_INDEX,
476 QLA8044_CRB_DRV_STATE_INDEX,
477 QLA8044_CRB_DRV_SCRATCH_INDEX,
478 QLA8044_CRB_DEV_PART_INFO_INDEX,
479 QLA8044_CRB_DRV_IDC_VERSION_INDEX,
480 QLA8044_FW_VERSION_MAJOR_INDEX,
481 QLA8044_FW_VERSION_MINOR_INDEX,
482 QLA8044_FW_VERSION_SUB_INDEX,
483 QLA8044_CRB_CMDPEG_STATE_INDEX,
484 QLA8044_CRB_TEMP_STATE_INDEX,
485} __packed;
486
487#define CRB_REG_INDEX_MAX 14
488#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
489#define CRB_CMDPEG_CHECK_DELAY 500
490
491static const uint32_t qla8044_reg_tbl[] = {
492 QLA8044_PEG_HALT_STATUS1,
493 QLA8044_PEG_HALT_STATUS2,
494 QLA8044_PEG_ALIVE_COUNTER,
495 QLA8044_CRB_DRV_ACTIVE,
496 QLA8044_CRB_DEV_STATE,
497 QLA8044_CRB_DRV_STATE,
498 QLA8044_CRB_DRV_SCRATCH,
499 QLA8044_CRB_DEV_PART_INFO1,
500 QLA8044_CRB_IDC_VER_MAJOR,
501 QLA8044_FW_VER_MAJOR,
502 QLA8044_FW_VER_MINOR,
503 QLA8044_FW_VER_SUB,
504 QLA8044_CMDPEG_STATE,
505 QLA8044_ASIC_TEMP,
506};
507
508/* MiniDump Structures */
509
510/* Driver_code is for driver to write some info about the entry
511 * currently not used.
512 */
513#define QLA8044_SS_OCM_WNDREG_INDEX 3
514#define QLA8044_DBG_STATE_ARRAY_LEN 16
515#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN 8
516#define QLA8044_DBG_RSVD_ARRAY_LEN 8
517#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16
518#define QLA8044_SS_PCI_INDEX 0
519
520struct qla8044_minidump_template_hdr {
521 uint32_t entry_type;
522 uint32_t first_entry_offset;
523 uint32_t size_of_template;
524 uint32_t capture_debug_level;
525 uint32_t num_of_entries;
526 uint32_t version;
527 uint32_t driver_timestamp;
528 uint32_t checksum;
529
530 uint32_t driver_capture_mask;
531 uint32_t driver_info_word2;
532 uint32_t driver_info_word3;
533 uint32_t driver_info_word4;
534
535 uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN];
536 uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN];
537 uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN];
538};
539
540struct qla8044_pex_dma_descriptor {
541 struct {
542 uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
543 uint8_t rsvd[2];
544 uint16_t dma_desc_cmd;
545 } cmd;
546 uint64_t src_addr;
547 uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/
548 uint8_t rsvd[24];
549} __packed;
550
551#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3e21e9fc9d91..9f01bbbf3a26 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1247,7 +1247,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1247 if (qla2x00_vp_abort_isp(vha)) 1247 if (qla2x00_vp_abort_isp(vha))
1248 goto eh_host_reset_lock; 1248 goto eh_host_reset_lock;
1249 } else { 1249 } else {
1250 if (IS_QLA82XX(vha->hw)) { 1250 if (IS_P3P_TYPE(vha->hw)) {
1251 if (!qla82xx_fcoe_ctx_reset(vha)) { 1251 if (!qla82xx_fcoe_ctx_reset(vha)) {
1252 /* Ctx reset success */ 1252 /* Ctx reset success */
1253 ret = SUCCESS; 1253 ret = SUCCESS;
@@ -1303,6 +1303,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1303 struct fc_port *fcport; 1303 struct fc_port *fcport;
1304 struct qla_hw_data *ha = vha->hw; 1304 struct qla_hw_data *ha = vha->hw;
1305 1305
1306 if (IS_QLAFX00(ha)) {
1307 return qlafx00_loop_reset(vha);
1308 }
1309
1306 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { 1310 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
1307 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1311 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1308 if (fcport->port_type != FCT_TARGET) 1312 if (fcport->port_type != FCT_TARGET)
@@ -1311,14 +1315,12 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1311 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1315 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1312 if (ret != QLA_SUCCESS) { 1316 if (ret != QLA_SUCCESS) {
1313 ql_dbg(ql_dbg_taskm, vha, 0x802c, 1317 ql_dbg(ql_dbg_taskm, vha, 0x802c,
1314 "Bus Reset failed: Target Reset=%d " 1318 "Bus Reset failed: Reset=%d "
1315 "d_id=%x.\n", ret, fcport->d_id.b24); 1319 "d_id=%x.\n", ret, fcport->d_id.b24);
1316 } 1320 }
1317 } 1321 }
1318 } 1322 }
1319 1323
1320 if (IS_QLAFX00(ha))
1321 return QLA_SUCCESS;
1322 1324
1323 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { 1325 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1324 atomic_set(&vha->loop_state, LOOP_DOWN); 1326 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1506,7 +1508,7 @@ qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
1506 if (sdev->queue_depth > shost->cmd_per_lun) { 1508 if (sdev->queue_depth > shost->cmd_per_lun) {
1507 if (sdev->queue_depth < ha->cfg_lun_q_depth) 1509 if (sdev->queue_depth < ha->cfg_lun_q_depth)
1508 continue; 1510 continue;
1509 ql_log(ql_log_warn, vp, 0x3031, 1511 ql_dbg(ql_dbg_io, vp, 0x3031,
1510 "%ld:%d:%d: Ramping down queue depth to %d", 1512 "%ld:%d:%d: Ramping down queue depth to %d",
1511 vp->host_no, sdev->id, sdev->lun, 1513 vp->host_no, sdev->id, sdev->lun,
1512 ha->cfg_lun_q_depth); 1514 ha->cfg_lun_q_depth);
@@ -1911,7 +1913,7 @@ static struct isp_operations qla2300_isp_ops = {
1911 .get_flash_version = qla2x00_get_flash_version, 1913 .get_flash_version = qla2x00_get_flash_version,
1912 .start_scsi = qla2x00_start_scsi, 1914 .start_scsi = qla2x00_start_scsi,
1913 .abort_isp = qla2x00_abort_isp, 1915 .abort_isp = qla2x00_abort_isp,
1914 .iospace_config = qla2x00_iospace_config, 1916 .iospace_config = qla2x00_iospace_config,
1915 .initialize_adapter = qla2x00_initialize_adapter, 1917 .initialize_adapter = qla2x00_initialize_adapter,
1916}; 1918};
1917 1919
@@ -1949,7 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = {
1949 .get_flash_version = qla24xx_get_flash_version, 1951 .get_flash_version = qla24xx_get_flash_version,
1950 .start_scsi = qla24xx_start_scsi, 1952 .start_scsi = qla24xx_start_scsi,
1951 .abort_isp = qla2x00_abort_isp, 1953 .abort_isp = qla2x00_abort_isp,
1952 .iospace_config = qla2x00_iospace_config, 1954 .iospace_config = qla2x00_iospace_config,
1953 .initialize_adapter = qla2x00_initialize_adapter, 1955 .initialize_adapter = qla2x00_initialize_adapter,
1954}; 1956};
1955 1957
@@ -1987,7 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = {
1987 .get_flash_version = qla24xx_get_flash_version, 1989 .get_flash_version = qla24xx_get_flash_version,
1988 .start_scsi = qla24xx_dif_start_scsi, 1990 .start_scsi = qla24xx_dif_start_scsi,
1989 .abort_isp = qla2x00_abort_isp, 1991 .abort_isp = qla2x00_abort_isp,
1990 .iospace_config = qla2x00_iospace_config, 1992 .iospace_config = qla2x00_iospace_config,
1991 .initialize_adapter = qla2x00_initialize_adapter, 1993 .initialize_adapter = qla2x00_initialize_adapter,
1992}; 1994};
1993 1995
@@ -2025,7 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = {
2025 .get_flash_version = qla24xx_get_flash_version, 2027 .get_flash_version = qla24xx_get_flash_version,
2026 .start_scsi = qla24xx_dif_start_scsi, 2028 .start_scsi = qla24xx_dif_start_scsi,
2027 .abort_isp = qla2x00_abort_isp, 2029 .abort_isp = qla2x00_abort_isp,
2028 .iospace_config = qla2x00_iospace_config, 2030 .iospace_config = qla2x00_iospace_config,
2029 .initialize_adapter = qla2x00_initialize_adapter, 2031 .initialize_adapter = qla2x00_initialize_adapter,
2030}; 2032};
2031 2033
@@ -2060,13 +2062,51 @@ static struct isp_operations qla82xx_isp_ops = {
2060 .beacon_blink = NULL, 2062 .beacon_blink = NULL,
2061 .read_optrom = qla82xx_read_optrom_data, 2063 .read_optrom = qla82xx_read_optrom_data,
2062 .write_optrom = qla82xx_write_optrom_data, 2064 .write_optrom = qla82xx_write_optrom_data,
2063 .get_flash_version = qla24xx_get_flash_version, 2065 .get_flash_version = qla82xx_get_flash_version,
2064 .start_scsi = qla82xx_start_scsi, 2066 .start_scsi = qla82xx_start_scsi,
2065 .abort_isp = qla82xx_abort_isp, 2067 .abort_isp = qla82xx_abort_isp,
2066 .iospace_config = qla82xx_iospace_config, 2068 .iospace_config = qla82xx_iospace_config,
2067 .initialize_adapter = qla2x00_initialize_adapter, 2069 .initialize_adapter = qla2x00_initialize_adapter,
2068}; 2070};
2069 2071
2072static struct isp_operations qla8044_isp_ops = {
2073 .pci_config = qla82xx_pci_config,
2074 .reset_chip = qla82xx_reset_chip,
2075 .chip_diag = qla24xx_chip_diag,
2076 .config_rings = qla82xx_config_rings,
2077 .reset_adapter = qla24xx_reset_adapter,
2078 .nvram_config = qla81xx_nvram_config,
2079 .update_fw_options = qla24xx_update_fw_options,
2080 .load_risc = qla82xx_load_risc,
2081 .pci_info_str = qla24xx_pci_info_str,
2082 .fw_version_str = qla24xx_fw_version_str,
2083 .intr_handler = qla8044_intr_handler,
2084 .enable_intrs = qla82xx_enable_intrs,
2085 .disable_intrs = qla82xx_disable_intrs,
2086 .abort_command = qla24xx_abort_command,
2087 .target_reset = qla24xx_abort_target,
2088 .lun_reset = qla24xx_lun_reset,
2089 .fabric_login = qla24xx_login_fabric,
2090 .fabric_logout = qla24xx_fabric_logout,
2091 .calc_req_entries = NULL,
2092 .build_iocbs = NULL,
2093 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2094 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2095 .read_nvram = NULL,
2096 .write_nvram = NULL,
2097 .fw_dump = qla24xx_fw_dump,
2098 .beacon_on = qla82xx_beacon_on,
2099 .beacon_off = qla82xx_beacon_off,
2100 .beacon_blink = NULL,
2101 .read_optrom = qla82xx_read_optrom_data,
2102 .write_optrom = qla8044_write_optrom_data,
2103 .get_flash_version = qla82xx_get_flash_version,
2104 .start_scsi = qla82xx_start_scsi,
2105 .abort_isp = qla8044_abort_isp,
2106 .iospace_config = qla82xx_iospace_config,
2107 .initialize_adapter = qla2x00_initialize_adapter,
2108};
2109
2070static struct isp_operations qla83xx_isp_ops = { 2110static struct isp_operations qla83xx_isp_ops = {
2071 .pci_config = qla25xx_pci_config, 2111 .pci_config = qla25xx_pci_config,
2072 .reset_chip = qla24xx_reset_chip, 2112 .reset_chip = qla24xx_reset_chip,
@@ -2237,6 +2277,14 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
2237 /* Initialize 82XX ISP flags */ 2277 /* Initialize 82XX ISP flags */
2238 qla82xx_init_flags(ha); 2278 qla82xx_init_flags(ha);
2239 break; 2279 break;
2280 case PCI_DEVICE_ID_QLOGIC_ISP8044:
2281 ha->device_type |= DT_ISP8044;
2282 ha->device_type |= DT_ZIO_SUPPORTED;
2283 ha->device_type |= DT_FWI2;
2284 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2285 /* Initialize 82XX ISP flags */
2286 qla82xx_init_flags(ha);
2287 break;
2240 case PCI_DEVICE_ID_QLOGIC_ISP2031: 2288 case PCI_DEVICE_ID_QLOGIC_ISP2031:
2241 ha->device_type |= DT_ISP2031; 2289 ha->device_type |= DT_ISP2031;
2242 ha->device_type |= DT_ZIO_SUPPORTED; 2290 ha->device_type |= DT_ZIO_SUPPORTED;
@@ -2317,7 +2365,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2317 uint16_t req_length = 0, rsp_length = 0; 2365 uint16_t req_length = 0, rsp_length = 0;
2318 struct req_que *req = NULL; 2366 struct req_que *req = NULL;
2319 struct rsp_que *rsp = NULL; 2367 struct rsp_que *rsp = NULL;
2320
2321 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 2368 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
2322 sht = &qla2xxx_driver_template; 2369 sht = &qla2xxx_driver_template;
2323 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || 2370 if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2330,7 +2377,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2330 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || 2377 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
2331 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2378 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2332 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2379 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2333 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) { 2380 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2381 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) {
2334 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2382 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2335 mem_only = 1; 2383 mem_only = 1;
2336 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2384 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2484,6 +2532,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2484 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2532 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2485 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2533 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2486 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2534 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2535 } else if (IS_QLA8044(ha)) {
2536 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2537 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2538 req_length = REQUEST_ENTRY_CNT_82XX;
2539 rsp_length = RESPONSE_ENTRY_CNT_82XX;
2540 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2541 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2542 ha->gid_list_info_size = 8;
2543 ha->optrom_size = OPTROM_SIZE_83XX;
2544 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2545 ha->isp_ops = &qla8044_isp_ops;
2546 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2547 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2548 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2549 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2487 } else if (IS_QLA83XX(ha)) { 2550 } else if (IS_QLA83XX(ha)) {
2488 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2551 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2489 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2552 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
@@ -2512,6 +2575,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2512 ha->port_down_retry_count = 30; /* default value */ 2575 ha->port_down_retry_count = 30; /* default value */
2513 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2576 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
2514 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; 2577 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
2578 ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
2515 ha->mr.fw_hbt_en = 1; 2579 ha->mr.fw_hbt_en = 1;
2516 } 2580 }
2517 2581
@@ -2676,7 +2740,7 @@ que_init:
2676 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; 2740 rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
2677 } 2741 }
2678 2742
2679 if (IS_QLA82XX(ha)) { 2743 if (IS_P3P_TYPE(ha)) {
2680 req->req_q_out = &ha->iobase->isp82.req_q_out[0]; 2744 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2681 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; 2745 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2682 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2746 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
@@ -2709,6 +2773,14 @@ que_init:
2709 qla82xx_idc_unlock(ha); 2773 qla82xx_idc_unlock(ha);
2710 ql_log(ql_log_fatal, base_vha, 0x00d7, 2774 ql_log(ql_log_fatal, base_vha, 0x00d7,
2711 "HW State: FAILED.\n"); 2775 "HW State: FAILED.\n");
2776 } else if (IS_QLA8044(ha)) {
2777 qla8044_idc_lock(ha);
2778 qla8044_wr_direct(base_vha,
2779 QLA8044_CRB_DEV_STATE_INDEX,
2780 QLA8XXX_DEV_FAILED);
2781 qla8044_idc_unlock(ha);
2782 ql_log(ql_log_fatal, base_vha, 0x0150,
2783 "HW State: FAILED.\n");
2712 } 2784 }
2713 2785
2714 ret = -ENODEV; 2786 ret = -ENODEV;
@@ -2804,6 +2876,13 @@ skip_dpc:
2804 2876
2805 ha->isp_ops->enable_intrs(ha); 2877 ha->isp_ops->enable_intrs(ha);
2806 2878
2879 if (IS_QLAFX00(ha)) {
2880 ret = qlafx00_fx_disc(base_vha,
2881 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
2882 host->sg_tablesize = (ha->mr.extended_io_enabled) ?
2883 QLA_SG_ALL : 128;
2884 }
2885
2807 ret = scsi_add_host(host, &pdev->dev); 2886 ret = scsi_add_host(host, &pdev->dev);
2808 if (ret) 2887 if (ret)
2809 goto probe_failed; 2888 goto probe_failed;
@@ -2824,9 +2903,6 @@ skip_dpc:
2824 2903
2825 if (IS_QLAFX00(ha)) { 2904 if (IS_QLAFX00(ha)) {
2826 ret = qlafx00_fx_disc(base_vha, 2905 ret = qlafx00_fx_disc(base_vha,
2827 &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
2828
2829 ret = qlafx00_fx_disc(base_vha,
2830 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); 2906 &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
2831 2907
2832 /* Register system information */ 2908 /* Register system information */
@@ -2881,8 +2957,13 @@ probe_hw_failed:
2881 qla82xx_clear_drv_active(ha); 2957 qla82xx_clear_drv_active(ha);
2882 qla82xx_idc_unlock(ha); 2958 qla82xx_idc_unlock(ha);
2883 } 2959 }
2960 if (IS_QLA8044(ha)) {
2961 qla8044_idc_lock(ha);
2962 qla8044_clear_drv_active(base_vha);
2963 qla8044_idc_unlock(ha);
2964 }
2884iospace_config_failed: 2965iospace_config_failed:
2885 if (IS_QLA82XX(ha)) { 2966 if (IS_P3P_TYPE(ha)) {
2886 if (!ha->nx_pcibase) 2967 if (!ha->nx_pcibase)
2887 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 2968 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2888 if (!ql2xdbwr) 2969 if (!ql2xdbwr)
@@ -2930,6 +3011,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
2930 vha = pci_get_drvdata(pdev); 3011 vha = pci_get_drvdata(pdev);
2931 ha = vha->hw; 3012 ha = vha->hw;
2932 3013
3014 /* Notify ISPFX00 firmware */
3015 if (IS_QLAFX00(ha))
3016 qlafx00_driver_shutdown(vha, 20);
3017
2933 /* Turn-off FCE trace */ 3018 /* Turn-off FCE trace */
2934 if (ha->flags.fce_enabled) { 3019 if (ha->flags.fce_enabled) {
2935 qla2x00_disable_fce_trace(vha, NULL, NULL); 3020 qla2x00_disable_fce_trace(vha, NULL, NULL);
@@ -2977,6 +3062,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
2977 ha->flags.host_shutting_down = 1; 3062 ha->flags.host_shutting_down = 1;
2978 3063
2979 set_bit(UNLOADING, &base_vha->dpc_flags); 3064 set_bit(UNLOADING, &base_vha->dpc_flags);
3065 if (IS_QLAFX00(ha))
3066 qlafx00_driver_shutdown(base_vha, 20);
3067
2980 mutex_lock(&ha->vport_lock); 3068 mutex_lock(&ha->vport_lock);
2981 while (ha->cur_vport_count) { 3069 while (ha->cur_vport_count) {
2982 spin_lock_irqsave(&ha->vport_slock, flags); 3070 spin_lock_irqsave(&ha->vport_slock, flags);
@@ -3061,6 +3149,11 @@ qla2x00_remove_one(struct pci_dev *pdev)
3061 3149
3062 scsi_host_put(base_vha->host); 3150 scsi_host_put(base_vha->host);
3063 3151
3152 if (IS_QLA8044(ha)) {
3153 qla8044_idc_lock(ha);
3154 qla8044_clear_drv_active(base_vha);
3155 qla8044_idc_unlock(ha);
3156 }
3064 if (IS_QLA82XX(ha)) { 3157 if (IS_QLA82XX(ha)) {
3065 qla82xx_idc_lock(ha); 3158 qla82xx_idc_lock(ha);
3066 qla82xx_clear_drv_active(ha); 3159 qla82xx_clear_drv_active(ha);
@@ -3210,14 +3303,8 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
3210 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3303 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3211 3304
3212 ql_dbg(ql_dbg_disc, vha, 0x2067, 3305 ql_dbg(ql_dbg_disc, vha, 0x2067,
3213 "Port login retry " 3306 "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
3214 "%02x%02x%02x%02x%02x%02x%02x%02x, " 3307 fcport->port_name, fcport->loop_id, fcport->login_retry);
3215 "id = 0x%04x retry cnt=%d.\n",
3216 fcport->port_name[0], fcport->port_name[1],
3217 fcport->port_name[2], fcport->port_name[3],
3218 fcport->port_name[4], fcport->port_name[5],
3219 fcport->port_name[6], fcport->port_name[7],
3220 fcport->loop_id, fcport->login_retry);
3221 } 3308 }
3222} 3309}
3223 3310
@@ -3290,7 +3377,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3290 if (!ha->srb_mempool) 3377 if (!ha->srb_mempool)
3291 goto fail_free_gid_list; 3378 goto fail_free_gid_list;
3292 3379
3293 if (IS_QLA82XX(ha)) { 3380 if (IS_P3P_TYPE(ha)) {
3294 /* Allocate cache for CT6 Ctx. */ 3381 /* Allocate cache for CT6 Ctx. */
3295 if (!ctx_cachep) { 3382 if (!ctx_cachep) {
3296 ctx_cachep = kmem_cache_create("qla2xxx_ctx", 3383 ctx_cachep = kmem_cache_create("qla2xxx_ctx",
@@ -3324,7 +3411,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3324 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", 3411 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
3325 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); 3412 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
3326 3413
3327 if (IS_QLA82XX(ha) || ql2xenabledif) { 3414 if (IS_P3P_TYPE(ha) || ql2xenabledif) {
3328 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 3415 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
3329 DSD_LIST_DMA_POOL_SIZE, 8, 0); 3416 DSD_LIST_DMA_POOL_SIZE, 8, 0);
3330 if (!ha->dl_dma_pool) { 3417 if (!ha->dl_dma_pool) {
@@ -3532,7 +3619,7 @@ fail:
3532* Frees fw dump stuff. 3619* Frees fw dump stuff.
3533* 3620*
3534* Input: 3621* Input:
3535* ha = adapter block pointer. 3622* ha = adapter block pointer
3536*/ 3623*/
3537static void 3624static void
3538qla2x00_free_fw_dump(struct qla_hw_data *ha) 3625qla2x00_free_fw_dump(struct qla_hw_data *ha)
@@ -4699,17 +4786,33 @@ qla2x00_do_dpc(void *data)
4699 4786
4700 qla2x00_do_work(base_vha); 4787 qla2x00_do_work(base_vha);
4701 4788
4702 if (IS_QLA82XX(ha)) { 4789 if (IS_P3P_TYPE(ha)) {
4703 if (test_and_clear_bit(ISP_UNRECOVERABLE, 4790 if (IS_QLA8044(ha)) {
4704 &base_vha->dpc_flags)) { 4791 if (test_and_clear_bit(ISP_UNRECOVERABLE,
4705 qla82xx_idc_lock(ha); 4792 &base_vha->dpc_flags)) {
4706 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4793 qla8044_idc_lock(ha);
4707 QLA8XXX_DEV_FAILED); 4794 qla8044_wr_direct(base_vha,
4708 qla82xx_idc_unlock(ha); 4795 QLA8044_CRB_DEV_STATE_INDEX,
4709 ql_log(ql_log_info, base_vha, 0x4004, 4796 QLA8XXX_DEV_FAILED);
4710 "HW State: FAILED.\n"); 4797 qla8044_idc_unlock(ha);
4711 qla82xx_device_state_handler(base_vha); 4798 ql_log(ql_log_info, base_vha, 0x4004,
4712 continue; 4799 "HW State: FAILED.\n");
4800 qla8044_device_state_handler(base_vha);
4801 continue;
4802 }
4803
4804 } else {
4805 if (test_and_clear_bit(ISP_UNRECOVERABLE,
4806 &base_vha->dpc_flags)) {
4807 qla82xx_idc_lock(ha);
4808 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4809 QLA8XXX_DEV_FAILED);
4810 qla82xx_idc_unlock(ha);
4811 ql_log(ql_log_info, base_vha, 0x0151,
4812 "HW State: FAILED.\n");
4813 qla82xx_device_state_handler(base_vha);
4814 continue;
4815 }
4713 } 4816 }
4714 4817
4715 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 4818 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
@@ -4809,16 +4912,26 @@ qla2x00_do_dpc(void *data)
4809 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 4912 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
4810 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 4913 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
4811 "Quiescence mode scheduled.\n"); 4914 "Quiescence mode scheduled.\n");
4812 if (IS_QLA82XX(ha)) { 4915 if (IS_P3P_TYPE(ha)) {
4813 qla82xx_device_state_handler(base_vha); 4916 if (IS_QLA82XX(ha))
4917 qla82xx_device_state_handler(base_vha);
4918 if (IS_QLA8044(ha))
4919 qla8044_device_state_handler(base_vha);
4814 clear_bit(ISP_QUIESCE_NEEDED, 4920 clear_bit(ISP_QUIESCE_NEEDED,
4815 &base_vha->dpc_flags); 4921 &base_vha->dpc_flags);
4816 if (!ha->flags.quiesce_owner) { 4922 if (!ha->flags.quiesce_owner) {
4817 qla2x00_perform_loop_resync(base_vha); 4923 qla2x00_perform_loop_resync(base_vha);
4818 4924 if (IS_QLA82XX(ha)) {
4819 qla82xx_idc_lock(ha); 4925 qla82xx_idc_lock(ha);
4820 qla82xx_clear_qsnt_ready(base_vha); 4926 qla82xx_clear_qsnt_ready(
4821 qla82xx_idc_unlock(ha); 4927 base_vha);
4928 qla82xx_idc_unlock(ha);
4929 } else if (IS_QLA8044(ha)) {
4930 qla8044_idc_lock(ha);
4931 qla8044_clear_qsnt_ready(
4932 base_vha);
4933 qla8044_idc_unlock(ha);
4934 }
4822 } 4935 }
4823 } else { 4936 } else {
4824 clear_bit(ISP_QUIESCE_NEEDED, 4937 clear_bit(ISP_QUIESCE_NEEDED,
@@ -4992,10 +5105,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
4992 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 5105 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
4993 5106
4994 /* Make sure qla82xx_watchdog is run only for physical port */ 5107 /* Make sure qla82xx_watchdog is run only for physical port */
4995 if (!vha->vp_idx && IS_QLA82XX(ha)) { 5108 if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
4996 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) 5109 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
4997 start_dpc++; 5110 start_dpc++;
4998 qla82xx_watchdog(vha); 5111 if (IS_QLA82XX(ha))
5112 qla82xx_watchdog(vha);
5113 else if (IS_QLA8044(ha))
5114 qla8044_watchdog(vha);
4999 } 5115 }
5000 5116
5001 if (!vha->vp_idx && IS_QLAFX00(ha)) 5117 if (!vha->vp_idx && IS_QLAFX00(ha))
@@ -5075,7 +5191,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5075 /* Check if beacon LED needs to be blinked for physical host only */ 5191 /* Check if beacon LED needs to be blinked for physical host only */
5076 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 5192 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
5077 /* There is no beacon_blink function for ISP82xx */ 5193 /* There is no beacon_blink function for ISP82xx */
5078 if (!IS_QLA82XX(ha)) { 5194 if (!IS_P3P_TYPE(ha)) {
5079 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 5195 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
5080 start_dpc++; 5196 start_dpc++;
5081 } 5197 }
@@ -5519,6 +5635,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
5519 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 5635 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5520 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 5636 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
5521 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 5637 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
5638 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
5522 { 0 }, 5639 { 0 },
5523}; 5640};
5524MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5641MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3bef6736d885..bd56cde795fc 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -565,7 +565,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
565 *start = FA_FLASH_LAYOUT_ADDR; 565 *start = FA_FLASH_LAYOUT_ADDR;
566 else if (IS_QLA81XX(ha)) 566 else if (IS_QLA81XX(ha))
567 *start = FA_FLASH_LAYOUT_ADDR_81; 567 *start = FA_FLASH_LAYOUT_ADDR_81;
568 else if (IS_QLA82XX(ha)) { 568 else if (IS_P3P_TYPE(ha)) {
569 *start = FA_FLASH_LAYOUT_ADDR_82; 569 *start = FA_FLASH_LAYOUT_ADDR_82;
570 goto end; 570 goto end;
571 } else if (IS_QLA83XX(ha)) { 571 } else if (IS_QLA83XX(ha)) {
@@ -719,7 +719,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
719 start = le32_to_cpu(region->start) >> 2; 719 start = le32_to_cpu(region->start) >> 2;
720 ql_dbg(ql_dbg_init, vha, 0x0049, 720 ql_dbg(ql_dbg_init, vha, 0x0049,
721 "FLT[%02x]: start=0x%x " 721 "FLT[%02x]: start=0x%x "
722 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), 722 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
723 start, le32_to_cpu(region->end) >> 2, 723 start, le32_to_cpu(region->end) >> 2,
724 le32_to_cpu(region->size)); 724 le32_to_cpu(region->size));
725 725
@@ -741,13 +741,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
741 if (IS_QLA8031(ha)) 741 if (IS_QLA8031(ha))
742 break; 742 break;
743 ha->flt_region_vpd_nvram = start; 743 ha->flt_region_vpd_nvram = start;
744 if (IS_QLA82XX(ha)) 744 if (IS_P3P_TYPE(ha))
745 break; 745 break;
746 if (ha->flags.port0) 746 if (ha->flags.port0)
747 ha->flt_region_vpd = start; 747 ha->flt_region_vpd = start;
748 break; 748 break;
749 case FLT_REG_VPD_1: 749 case FLT_REG_VPD_1:
750 if (IS_QLA82XX(ha) || IS_QLA8031(ha)) 750 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
751 break; 751 break;
752 if (!ha->flags.port0) 752 if (!ha->flags.port0)
753 ha->flt_region_vpd = start; 753 ha->flt_region_vpd = start;
@@ -789,9 +789,17 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
789 case FLT_REG_BOOT_CODE_82XX: 789 case FLT_REG_BOOT_CODE_82XX:
790 ha->flt_region_boot = start; 790 ha->flt_region_boot = start;
791 break; 791 break;
792 case FLT_REG_BOOT_CODE_8044:
793 if (IS_QLA8044(ha))
794 ha->flt_region_boot = start;
795 break;
792 case FLT_REG_FW_82XX: 796 case FLT_REG_FW_82XX:
793 ha->flt_region_fw = start; 797 ha->flt_region_fw = start;
794 break; 798 break;
799 case FLT_REG_CNA_FW:
800 if (IS_CNA_CAPABLE(ha))
801 ha->flt_region_fw = start;
802 break;
795 case FLT_REG_GOLD_FW_82XX: 803 case FLT_REG_GOLD_FW_82XX:
796 ha->flt_region_gold_fw = start; 804 ha->flt_region_gold_fw = start;
797 break; 805 break;
@@ -803,13 +811,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
803 ha->flt_region_vpd = start; 811 ha->flt_region_vpd = start;
804 break; 812 break;
805 case FLT_REG_FCOE_NVRAM_0: 813 case FLT_REG_FCOE_NVRAM_0:
806 if (!IS_QLA8031(ha)) 814 if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
807 break; 815 break;
808 if (ha->flags.port0) 816 if (ha->flags.port0)
809 ha->flt_region_nvram = start; 817 ha->flt_region_nvram = start;
810 break; 818 break;
811 case FLT_REG_FCOE_NVRAM_1: 819 case FLT_REG_FCOE_NVRAM_1:
812 if (!IS_QLA8031(ha)) 820 if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
813 break; 821 break;
814 if (!ha->flags.port0) 822 if (!ha->flags.port0)
815 ha->flt_region_nvram = start; 823 ha->flt_region_nvram = start;
@@ -883,7 +891,13 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
883 mid = le16_to_cpu(fdt->man_id); 891 mid = le16_to_cpu(fdt->man_id);
884 fid = le16_to_cpu(fdt->id); 892 fid = le16_to_cpu(fdt->id);
885 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 893 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
886 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd); 894 ha->fdt_wrt_enable = fdt->wrt_enable_bits;
895 ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd;
896 if (IS_QLA8044(ha))
897 ha->fdt_erase_cmd = fdt->erase_cmd;
898 else
899 ha->fdt_erase_cmd =
900 flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
887 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 901 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
888 if (fdt->unprotect_sec_cmd) { 902 if (fdt->unprotect_sec_cmd) {
889 ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 | 903 ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
@@ -895,7 +909,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
895 goto done; 909 goto done;
896no_flash_data: 910no_flash_data:
897 loc = locations[0]; 911 loc = locations[0];
898 if (IS_QLA82XX(ha)) { 912 if (IS_P3P_TYPE(ha)) {
899 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 913 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
900 goto done; 914 goto done;
901 } 915 }
@@ -946,7 +960,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
946 struct qla_hw_data *ha = vha->hw; 960 struct qla_hw_data *ha = vha->hw;
947 struct req_que *req = ha->req_q_map[0]; 961 struct req_que *req = ha->req_q_map[0];
948 962
949 if (!IS_QLA82XX(ha)) 963 if (!(IS_P3P_TYPE(ha)))
950 return; 964 return;
951 965
952 wptr = (uint32_t *)req->ring; 966 wptr = (uint32_t *)req->ring;
@@ -1008,6 +1022,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1008 if (ha->flags.nic_core_reset_hdlr_active) 1022 if (ha->flags.nic_core_reset_hdlr_active)
1009 return; 1023 return;
1010 1024
1025 if (IS_QLA8044(ha))
1026 return;
1027
1011 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1028 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
1012 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 1029 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
1013 if (hdr.version == __constant_cpu_to_le16(0xffff)) 1030 if (hdr.version == __constant_cpu_to_le16(0xffff))
@@ -1302,7 +1319,7 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1302 uint32_t *dwptr; 1319 uint32_t *dwptr;
1303 struct qla_hw_data *ha = vha->hw; 1320 struct qla_hw_data *ha = vha->hw;
1304 1321
1305 if (IS_QLA82XX(ha)) 1322 if (IS_P3P_TYPE(ha))
1306 return buf; 1323 return buf;
1307 1324
1308 /* Dword reads to flash. */ 1325 /* Dword reads to flash. */
@@ -1360,7 +1377,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1360 1377
1361 ret = QLA_SUCCESS; 1378 ret = QLA_SUCCESS;
1362 1379
1363 if (IS_QLA82XX(ha)) 1380 if (IS_P3P_TYPE(ha))
1364 return ret; 1381 return ret;
1365 1382
1366 /* Enable flash write. */ 1383 /* Enable flash write. */
@@ -1474,7 +1491,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
1474 struct qla_hw_data *ha = vha->hw; 1491 struct qla_hw_data *ha = vha->hw;
1475 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1492 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1476 1493
1477 if (IS_QLA82XX(ha)) 1494 if (IS_P3P_TYPE(ha))
1478 return; 1495 return;
1479 1496
1480 spin_lock_irqsave(&ha->hardware_lock, flags); 1497 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1752,7 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1752 struct qla_hw_data *ha = vha->hw; 1769 struct qla_hw_data *ha = vha->hw;
1753 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1770 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1754 1771
1755 if (IS_QLA82XX(ha)) 1772 if (IS_P3P_TYPE(ha))
1756 return QLA_SUCCESS; 1773 return QLA_SUCCESS;
1757 1774
1758 if (IS_QLA8031(ha) || IS_QLA81XX(ha)) 1775 if (IS_QLA8031(ha) || IS_QLA81XX(ha))
@@ -1804,7 +1821,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1804 struct qla_hw_data *ha = vha->hw; 1821 struct qla_hw_data *ha = vha->hw;
1805 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1822 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1806 1823
1807 if (IS_QLA82XX(ha)) 1824 if (IS_P3P_TYPE(ha))
1808 return QLA_SUCCESS; 1825 return QLA_SUCCESS;
1809 1826
1810 ha->beacon_blink_led = 0; 1827 ha->beacon_blink_led = 0;
@@ -2822,6 +2839,121 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2822} 2839}
2823 2840
2824int 2841int
2842qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2843{
2844 int ret = QLA_SUCCESS;
2845 uint32_t pcihdr, pcids;
2846 uint32_t *dcode;
2847 uint8_t *bcode;
2848 uint8_t code_type, last_image;
2849 struct qla_hw_data *ha = vha->hw;
2850
2851 if (!mbuf)
2852 return QLA_FUNCTION_FAILED;
2853
2854 memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
2855 memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
2856 memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
2857 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2858
2859 dcode = mbuf;
2860
2861 /* Begin with first PCI expansion ROM header. */
2862 pcihdr = ha->flt_region_boot << 2;
2863 last_image = 1;
2864 do {
2865 /* Verify PCI expansion ROM header. */
2866 ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
2867 0x20 * 4);
2868 bcode = mbuf + (pcihdr % 4);
2869 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2870 /* No signature */
2871 ql_log(ql_log_fatal, vha, 0x0154,
2872 "No matching ROM signature.\n");
2873 ret = QLA_FUNCTION_FAILED;
2874 break;
2875 }
2876
2877 /* Locate PCI data structure. */
2878 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
2879
2880 ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
2881 0x20 * 4);
2882 bcode = mbuf + (pcihdr % 4);
2883
2884 /* Validate signature of PCI data structure. */
2885 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2886 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2887 /* Incorrect header. */
2888 ql_log(ql_log_fatal, vha, 0x0155,
2889 "PCI data struct not found pcir_adr=%x.\n", pcids);
2890 ret = QLA_FUNCTION_FAILED;
2891 break;
2892 }
2893
2894 /* Read version */
2895 code_type = bcode[0x14];
2896 switch (code_type) {
2897 case ROM_CODE_TYPE_BIOS:
2898 /* Intel x86, PC-AT compatible. */
2899 ha->bios_revision[0] = bcode[0x12];
2900 ha->bios_revision[1] = bcode[0x13];
2901 ql_dbg(ql_dbg_init, vha, 0x0156,
2902 "Read BIOS %d.%d.\n",
2903 ha->bios_revision[1], ha->bios_revision[0]);
2904 break;
2905 case ROM_CODE_TYPE_FCODE:
2906 /* Open Firmware standard for PCI (FCode). */
2907 ha->fcode_revision[0] = bcode[0x12];
2908 ha->fcode_revision[1] = bcode[0x13];
2909 ql_dbg(ql_dbg_init, vha, 0x0157,
2910 "Read FCODE %d.%d.\n",
2911 ha->fcode_revision[1], ha->fcode_revision[0]);
2912 break;
2913 case ROM_CODE_TYPE_EFI:
2914 /* Extensible Firmware Interface (EFI). */
2915 ha->efi_revision[0] = bcode[0x12];
2916 ha->efi_revision[1] = bcode[0x13];
2917 ql_dbg(ql_dbg_init, vha, 0x0158,
2918 "Read EFI %d.%d.\n",
2919 ha->efi_revision[1], ha->efi_revision[0]);
2920 break;
2921 default:
2922 ql_log(ql_log_warn, vha, 0x0159,
2923 "Unrecognized code type %x at pcids %x.\n",
2924 code_type, pcids);
2925 break;
2926 }
2927
2928 last_image = bcode[0x15] & BIT_7;
2929
2930 /* Locate next PCI expansion ROM. */
2931 pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
2932 } while (!last_image);
2933
2934 /* Read firmware image information. */
2935 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2936 dcode = mbuf;
2937 ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
2938 0x20);
2939 bcode = mbuf + (pcihdr % 4);
2940
2941 /* Validate signature of PCI data structure. */
2942 if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 &&
2943 bcode[0x2] == 0x40 && bcode[0x3] == 0x40) {
2944 ha->fw_revision[0] = bcode[0x4];
2945 ha->fw_revision[1] = bcode[0x5];
2946 ha->fw_revision[2] = bcode[0x6];
2947 ql_dbg(ql_dbg_init, vha, 0x0153,
2948 "Firmware revision %d.%d.%d\n",
2949 ha->fw_revision[0], ha->fw_revision[1],
2950 ha->fw_revision[2]);
2951 }
2952
2953 return ret;
2954}
2955
2956int
2825qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) 2957qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2826{ 2958{
2827 int ret = QLA_SUCCESS; 2959 int ret = QLA_SUCCESS;
@@ -2832,7 +2964,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2832 int i; 2964 int i;
2833 struct qla_hw_data *ha = vha->hw; 2965 struct qla_hw_data *ha = vha->hw;
2834 2966
2835 if (IS_QLA82XX(ha)) 2967 if (IS_P3P_TYPE(ha))
2836 return ret; 2968 return ret;
2837 2969
2838 if (!mbuf) 2970 if (!mbuf)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 83a8f7a9ec76..ff12d4677cc4 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -430,13 +430,8 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
430 } 430 }
431 431
432 ql_dbg(ql_dbg_tgt, vha, 0xe047, 432 ql_dbg(ql_dbg_tgt, vha, 0xe047,
433 "scsi(%ld): resetting (session %p from port " 433 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
434 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " 434 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
435 "mcmd %x, loop_id %d)\n", vha->host_no, sess,
436 sess->port_name[0], sess->port_name[1],
437 sess->port_name[2], sess->port_name[3],
438 sess->port_name[4], sess->port_name[5],
439 sess->port_name[6], sess->port_name[7],
440 mcmd, loop_id); 435 mcmd, loop_id);
441 436
442 lun = a->u.isp24.fcp_cmnd.lun; 437 lun = a->u.isp24.fcp_cmnd.lun;
@@ -467,15 +462,10 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
467 sess->expires = jiffies + dev_loss_tmo * HZ; 462 sess->expires = jiffies + dev_loss_tmo * HZ;
468 463
469 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 464 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
470 "qla_target(%d): session for port %02x:%02x:%02x:" 465 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
471 "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
472 "deletion in %u secs (expires: %lu) immed: %d\n", 466 "deletion in %u secs (expires: %lu) immed: %d\n",
473 sess->vha->vp_idx, 467 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
474 sess->port_name[0], sess->port_name[1], 468 sess->expires, immediate);
475 sess->port_name[2], sess->port_name[3],
476 sess->port_name[4], sess->port_name[5],
477 sess->port_name[6], sess->port_name[7],
478 sess->loop_id, dev_loss_tmo, sess->expires, immediate);
479 469
480 if (immediate) 470 if (immediate)
481 schedule_delayed_work(&tgt->sess_del_work, 0); 471 schedule_delayed_work(&tgt->sess_del_work, 0);
@@ -630,13 +620,9 @@ static struct qla_tgt_sess *qlt_create_sess(
630 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 620 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
631 if (!sess) { 621 if (!sess) {
632 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 622 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
633 "qla_target(%u): session allocation failed, " 623 "qla_target(%u): session allocation failed, all commands "
634 "all commands from port %02x:%02x:%02x:%02x:" 624 "from port %8phC will be refused", vha->vp_idx,
635 "%02x:%02x:%02x:%02x will be refused", vha->vp_idx, 625 fcport->port_name);
636 fcport->port_name[0], fcport->port_name[1],
637 fcport->port_name[2], fcport->port_name[3],
638 fcport->port_name[4], fcport->port_name[5],
639 fcport->port_name[6], fcport->port_name[7]);
640 626
641 return NULL; 627 return NULL;
642 } 628 }
@@ -680,15 +666,11 @@ static struct qla_tgt_sess *qlt_create_sess(
680 spin_unlock_irqrestore(&ha->hardware_lock, flags); 666 spin_unlock_irqrestore(&ha->hardware_lock, flags);
681 667
682 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 668 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
683 "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:" 669 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
684 "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed" 670 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
685 " completion %ssupported) added\n", 671 vha->vp_idx, local ? "local " : "", fcport->port_name,
686 vha->vp_idx, local ? "local " : "", fcport->port_name[0], 672 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
687 fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], 673 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
688 fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
689 fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
690 sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
691 "" : "not ");
692 674
693 return sess; 675 return sess;
694} 676}
@@ -730,13 +712,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
730 qlt_undelete_sess(sess); 712 qlt_undelete_sess(sess);
731 713
732 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 714 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
733 "qla_target(%u): %ssession for port %02x:" 715 "qla_target(%u): %ssession for port %8phC "
734 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) " 716 "(loop ID %d) reappeared\n", vha->vp_idx,
735 "reappeared\n", vha->vp_idx, sess->local ? "local " 717 sess->local ? "local " : "", sess->port_name,
736 : "", sess->port_name[0], sess->port_name[1],
737 sess->port_name[2], sess->port_name[3],
738 sess->port_name[4], sess->port_name[5],
739 sess->port_name[6], sess->port_name[7],
740 sess->loop_id); 718 sess->loop_id);
741 719
742 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 720 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
@@ -749,13 +727,8 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
749 if (sess && sess->local) { 727 if (sess && sess->local) {
750 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 728 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
751 "qla_target(%u): local session for " 729 "qla_target(%u): local session for "
752 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 730 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
753 "(loop ID %d) became global\n", vha->vp_idx, 731 fcport->port_name, sess->loop_id);
754 fcport->port_name[0], fcport->port_name[1],
755 fcport->port_name[2], fcport->port_name[3],
756 fcport->port_name[4], fcport->port_name[5],
757 fcport->port_name[6], fcport->port_name[7],
758 sess->loop_id);
759 sess->local = 0; 732 sess->local = 0;
760 } 733 }
761 ha->tgt.tgt_ops->put_sess(sess); 734 ha->tgt.tgt_ops->put_sess(sess);
@@ -2840,10 +2813,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2840 int res = 0; 2813 int res = 0;
2841 2814
2842 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 2815 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
2843 "qla_target(%d): Port ID: 0x%02x:%02x:%02x" 2816 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
2844 " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0], 2817 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
2845 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
2846 iocb->u.isp24.status_subcode);
2847 2818
2848 switch (iocb->u.isp24.status_subcode) { 2819 switch (iocb->u.isp24.status_subcode) {
2849 case ELS_PLOGI: 2820 case ELS_PLOGI:
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 6c66d22eb1b1..a808e293dae0 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.05.00.03-k" 10#define QLA2XXX_VERSION "8.06.00.08-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 5 13#define QLA_DRIVER_MINOR_VER 6
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index a318092e033f..a6da313e253b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1474,15 +1474,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1474 1474
1475 1475
1476 if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) 1476 if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
1477 pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", 1477 pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
1478 sess, 1478 sess, sess->port_name,
1479 sess->port_name[0], sess->port_name[1], 1479 sess->loop_id, loop_id, sess->s_id.b.domain,
1480 sess->port_name[2], sess->port_name[3], 1480 sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
1481 sess->port_name[4], sess->port_name[5], 1481 s_id.b.area, s_id.b.al_pa);
1482 sess->port_name[6], sess->port_name[7],
1483 sess->loop_id, loop_id,
1484 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
1485 s_id.b.domain, s_id.b.area, s_id.b.al_pa);
1486 1482
1487 if (sess->loop_id != loop_id) { 1483 if (sess->loop_id != loop_id) {
1488 /* 1484 /*
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index d607eb8e24cb..8196c2f7915c 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -259,8 +259,8 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
259 * Return: On success return QLA_SUCCESS 259 * Return: On success return QLA_SUCCESS
260 * On error return QLA_ERROR 260 * On error return QLA_ERROR
261 **/ 261 **/
262static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, 262int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
263 uint32_t *data, uint32_t count) 263 uint32_t *data, uint32_t count)
264{ 264{
265 int i, j; 265 int i, j;
266 uint32_t agt_ctrl; 266 uint32_t agt_ctrl;
@@ -1473,9 +1473,9 @@ int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
1473 __func__)); 1473 __func__));
1474 } 1474 }
1475 1475
1476 /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority 1476 /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
1477 * and which drivers are present. Unlike ISP8022, the function setting 1477 * priority and which drivers are present. Unlike ISP8022, the function
1478 * NEED_RESET, may not be the Reset owner. */ 1478 * setting NEED_RESET, may not be the Reset owner. */
1479 if (qla4_83xx_can_perform_reset(ha)) 1479 if (qla4_83xx_can_perform_reset(ha))
1480 set_bit(AF_8XXX_RST_OWNER, &ha->flags); 1480 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
1481 1481
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index fab237fa32cc..a0de6e25ea5a 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -290,4 +290,38 @@ struct qla4_83xx_idc_information {
290 uint32_t info3; /* IDC additional info */ 290 uint32_t info3; /* IDC additional info */
291}; 291};
292 292
293#define QLA83XX_PEX_DMA_ENGINE_INDEX 8
294#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000
295#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000
296#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0
297#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04
298#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08
299
300#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024)
301#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
302
303/* Read Memory: For Pex-DMA */
304struct qla4_83xx_minidump_entry_rdmem_pex_dma {
305 struct qla8xxx_minidump_entry_hdr h;
306 uint32_t desc_card_addr;
307 uint16_t dma_desc_cmd;
308 uint8_t rsvd[2];
309 uint32_t start_dma_cmd;
310 uint8_t rsvd2[12];
311 uint32_t read_addr;
312 uint32_t read_data_size;
313};
314
315struct qla4_83xx_pex_dma_descriptor {
316 struct {
317 uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
318 uint8_t rsvd[2];
319 uint16_t dma_desc_cmd;
320 } cmd;
321 uint64_t src_addr;
322 uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func,
323 * 8-15: desc-cmd */
324 uint8_t rsvd[24];
325} __packed;
326
293#endif 327#endif
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 19ee55a6226c..463239c972b0 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -83,7 +83,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
83 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, 83 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
84 QLA8XXX_DEV_NEED_RESET); 84 QLA8XXX_DEV_NEED_RESET);
85 if (is_qla8022(ha) || 85 if (is_qla8022(ha) ||
86 (is_qla8032(ha) && 86 ((is_qla8032(ha) || is_qla8042(ha)) &&
87 qla4_83xx_can_perform_reset(ha))) { 87 qla4_83xx_can_perform_reset(ha))) {
88 set_bit(AF_8XXX_RST_OWNER, &ha->flags); 88 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
89 set_bit(AF_FW_RECOVERY, &ha->flags); 89 set_bit(AF_FW_RECOVERY, &ha->flags);
@@ -158,14 +158,12 @@ qla4xxx_fw_version_show(struct device *dev,
158 158
159 if (is_qla80XX(ha)) 159 if (is_qla80XX(ha))
160 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 160 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
161 ha->firmware_version[0], 161 ha->fw_info.fw_major, ha->fw_info.fw_minor,
162 ha->firmware_version[1], 162 ha->fw_info.fw_patch, ha->fw_info.fw_build);
163 ha->patch_number, ha->build_number);
164 else 163 else
165 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", 164 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
166 ha->firmware_version[0], 165 ha->fw_info.fw_major, ha->fw_info.fw_minor,
167 ha->firmware_version[1], 166 ha->fw_info.fw_patch, ha->fw_info.fw_build);
168 ha->patch_number, ha->build_number);
169} 167}
170 168
171static ssize_t 169static ssize_t
@@ -181,8 +179,8 @@ qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
181 char *buf) 179 char *buf)
182{ 180{
183 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 181 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
184 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major, 182 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major,
185 ha->iscsi_minor); 183 ha->fw_info.iscsi_minor);
186} 184}
187 185
188static ssize_t 186static ssize_t
@@ -191,8 +189,8 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
191{ 189{
192 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 190 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
193 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", 191 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
194 ha->bootload_major, ha->bootload_minor, 192 ha->fw_info.bootload_major, ha->fw_info.bootload_minor,
195 ha->bootload_patch, ha->bootload_build); 193 ha->fw_info.bootload_patch, ha->fw_info.bootload_build);
196} 194}
197 195
198static ssize_t 196static ssize_t
@@ -259,6 +257,63 @@ qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
259 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name); 257 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
260} 258}
261 259
260static ssize_t
261qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr,
262 char *buf)
263{
264 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
265 return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date,
266 ha->fw_info.fw_build_time);
267}
268
269static ssize_t
270qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr,
271 char *buf)
272{
273 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
274 return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user);
275}
276
277static ssize_t
278qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr,
279 char *buf)
280{
281 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
282 return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp);
283}
284
285static ssize_t
286qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr,
287 char *buf)
288{
289 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
290 char *load_src = NULL;
291
292 switch (ha->fw_info.fw_load_source) {
293 case 1:
294 load_src = "Flash Primary";
295 break;
296 case 2:
297 load_src = "Flash Secondary";
298 break;
299 case 3:
300 load_src = "Host Download";
301 break;
302 }
303
304 return snprintf(buf, PAGE_SIZE, "%s\n", load_src);
305}
306
307static ssize_t
308qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr,
309 char *buf)
310{
311 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
312 qla4xxx_about_firmware(ha);
313 return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs,
314 ha->fw_uptime_msecs);
315}
316
262static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL); 317static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
263static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL); 318static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
264static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL); 319static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
@@ -269,6 +324,12 @@ static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
269static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL); 324static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
270static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL); 325static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
271static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL); 326static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
327static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL);
328static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL);
329static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
330 NULL);
331static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
332static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
272 333
273struct device_attribute *qla4xxx_host_attrs[] = { 334struct device_attribute *qla4xxx_host_attrs[] = {
274 &dev_attr_fw_version, 335 &dev_attr_fw_version,
@@ -281,5 +342,10 @@ struct device_attribute *qla4xxx_host_attrs[] = {
281 &dev_attr_phy_port_num, 342 &dev_attr_phy_port_num,
282 &dev_attr_iscsi_func_cnt, 343 &dev_attr_iscsi_func_cnt,
283 &dev_attr_hba_model, 344 &dev_attr_hba_model,
345 &dev_attr_fw_timestamp,
346 &dev_attr_fw_build_user,
347 &dev_attr_fw_ext_timestamp,
348 &dev_attr_fw_load_src,
349 &dev_attr_fw_uptime,
284 NULL, 350 NULL,
285}; 351};
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 8acdc582ff6d..cf8fdf1d1257 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2011 QLogic Corporation 3 * Copyright (c) 2011-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 77b7c594010f..5649e9ef59a8 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -141,21 +141,22 @@ void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
141 141
142 if (is_qla8022(ha)) { 142 if (is_qla8022(ha)) {
143 ql4_printk(KERN_INFO, ha, 143 ql4_printk(KERN_INFO, ha,
144 "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n" 144 "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
145 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" 145 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
146 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" 146 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
147 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" 147 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
148 " PEG_NET_4_PC: 0x%x\n", ha->host_no, 148 " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__,
149 __func__, halt_status1, halt_status2, 149 ha->pdev->device, halt_status1, halt_status2,
150 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), 150 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
151 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), 151 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
152 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), 152 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
153 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), 153 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
154 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); 154 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
155 } else if (is_qla8032(ha)) { 155 } else if (is_qla8032(ha) || is_qla8042(ha)) {
156 ql4_printk(KERN_INFO, ha, 156 ql4_printk(KERN_INFO, ha,
157 "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n" 157 "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
158 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", 158 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
159 ha->host_no, __func__, halt_status1, halt_status2); 159 ha->host_no, __func__, ha->pdev->device,
160 halt_status1, halt_status2);
160 } 161 }
161} 162}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ddf16a86bbf5..41327d46ecf5 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -64,6 +64,10 @@
64#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032 64#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032
65#endif 65#endif
66 66
67#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042
68#define PCI_DEVICE_ID_QLOGIC_ISP8042 0x8042
69#endif
70
67#define ISP4XXX_PCI_FN_1 0x1 71#define ISP4XXX_PCI_FN_1 0x1
68#define ISP4XXX_PCI_FN_2 0x3 72#define ISP4XXX_PCI_FN_2 0x3
69 73
@@ -201,6 +205,7 @@
201 205
202#define MAX_RESET_HA_RETRIES 2 206#define MAX_RESET_HA_RETRIES 2
203#define FW_ALIVE_WAIT_TOV 3 207#define FW_ALIVE_WAIT_TOV 3
208#define IDC_EXTEND_TOV 8
204 209
205#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 210#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
206 211
@@ -335,6 +340,7 @@ struct ql4_tuple_ddb {
335#define DF_BOOT_TGT 1 /* Boot target entry */ 340#define DF_BOOT_TGT 1 /* Boot target entry */
336#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ 341#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
337#define DF_FO_MASKED 3 342#define DF_FO_MASKED 3
343#define DF_DISABLE_RELOGIN 4 /* Disable relogin to device */
338 344
339enum qla4_work_type { 345enum qla4_work_type {
340 QLA4_EVENT_AEN, 346 QLA4_EVENT_AEN,
@@ -557,6 +563,7 @@ struct scsi_qla_host {
557#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/ 563#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
558#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/ 564#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
559#define DPC_POST_IDC_ACK 23 /* 0x00200000 */ 565#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
566#define DPC_RESTORE_ACB 24 /* 0x01000000 */
560 567
561 struct Scsi_Host *host; /* pointer to host data */ 568 struct Scsi_Host *host; /* pointer to host data */
562 uint32_t tot_ddbs; 569 uint32_t tot_ddbs;
@@ -734,12 +741,9 @@ struct scsi_qla_host {
734 struct iscsi_iface *iface_ipv6_1; 741 struct iscsi_iface *iface_ipv6_1;
735 742
736 /* --- From About Firmware --- */ 743 /* --- From About Firmware --- */
737 uint16_t iscsi_major; 744 struct about_fw_info fw_info;
738 uint16_t iscsi_minor; 745 uint32_t fw_uptime_secs; /* seconds elapsed since fw bootup */
739 uint16_t bootload_major; 746 uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */
740 uint16_t bootload_minor;
741 uint16_t bootload_patch;
742 uint16_t bootload_build;
743 uint16_t def_timeout; /* Default login timeout */ 747 uint16_t def_timeout; /* Default login timeout */
744 748
745 uint32_t flash_state; 749 uint32_t flash_state;
@@ -780,9 +784,11 @@ struct scsi_qla_host {
780 uint32_t *reg_tbl; 784 uint32_t *reg_tbl;
781 struct qla4_83xx_reset_template reset_tmplt; 785 struct qla4_83xx_reset_template reset_tmplt;
782 struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address 786 struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address
783 for ISP8324 */ 787 for ISP8324 and
788 and ISP8042 */
784 uint32_t pf_bit; 789 uint32_t pf_bit;
785 struct qla4_83xx_idc_information idc_info; 790 struct qla4_83xx_idc_information idc_info;
791 struct addr_ctrl_blk *saved_acb;
786}; 792};
787 793
788struct ql4_task_data { 794struct ql4_task_data {
@@ -850,9 +856,14 @@ static inline int is_qla8032(struct scsi_qla_host *ha)
850 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324; 856 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
851} 857}
852 858
859static inline int is_qla8042(struct scsi_qla_host *ha)
860{
861 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042;
862}
863
853static inline int is_qla80XX(struct scsi_qla_host *ha) 864static inline int is_qla80XX(struct scsi_qla_host *ha)
854{ 865{
855 return is_qla8022(ha) || is_qla8032(ha); 866 return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha);
856} 867}
857 868
858static inline int is_aer_supported(struct scsi_qla_host *ha) 869static inline int is_aer_supported(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index c7b8892b5a83..51d1a70f8b45 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -458,6 +458,7 @@ struct qla_flt_region {
458#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 458#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
459 459
460#define MBOX_CMD_IDC_ACK 0x0101 460#define MBOX_CMD_IDC_ACK 0x0101
461#define MBOX_CMD_IDC_TIME_EXTEND 0x0102
461#define MBOX_CMD_PORT_RESET 0x0120 462#define MBOX_CMD_PORT_RESET 0x0120
462#define MBOX_CMD_SET_PORT_CONFIG 0x0122 463#define MBOX_CMD_SET_PORT_CONFIG 0x0122
463 464
@@ -502,6 +503,7 @@ struct qla_flt_region {
502#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036 503#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
503#define MBOX_ASTS_IDC_COMPLETE 0x8100 504#define MBOX_ASTS_IDC_COMPLETE 0x8100
504#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101 505#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
506#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION 0x8102
505#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110 507#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110
506#define MBOX_ASTS_TXSCVR_INSERTED 0x8130 508#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
507#define MBOX_ASTS_TXSCVR_REMOVED 0x8131 509#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -512,6 +514,10 @@ struct qla_flt_region {
512#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022 514#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
513#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 515#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
514 516
517/* ACB Configuration Defines */
518#define ACB_CONFIG_DISABLE 0x00
519#define ACB_CONFIG_SET 0x01
520
515/* ACB State Defines */ 521/* ACB State Defines */
516#define ACB_STATE_UNCONFIGURED 0x00 522#define ACB_STATE_UNCONFIGURED 0x00
517#define ACB_STATE_INVALID 0x01 523#define ACB_STATE_INVALID 0x01
@@ -955,7 +961,7 @@ struct about_fw_info {
955 uint16_t bootload_minor; /* 46 - 47 */ 961 uint16_t bootload_minor; /* 46 - 47 */
956 uint16_t bootload_patch; /* 48 - 49 */ 962 uint16_t bootload_patch; /* 48 - 49 */
957 uint16_t bootload_build; /* 4A - 4B */ 963 uint16_t bootload_build; /* 4A - 4B */
958 uint8_t reserved2[180]; /* 4C - FF */ 964 uint8_t extended_timestamp[180];/* 4C - FF */
959}; 965};
960 966
961struct crash_record { 967struct crash_record {
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 4a428009f699..e6f2a2669dbd 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -266,6 +266,14 @@ int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
266 dma_addr_t dma_addr); 266 dma_addr_t dma_addr);
267int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, 267int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
268 char *password, uint16_t chap_index); 268 char *password, uint16_t chap_index);
269int qla4xxx_disable_acb(struct scsi_qla_host *ha);
270int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
271 uint32_t *mbox_sts, dma_addr_t acb_dma);
272int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
273 uint32_t acb_type, uint32_t len);
274int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
275int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
276 uint64_t addr, uint32_t *data, uint32_t count);
269 277
270extern int ql4xextended_error_logging; 278extern int ql4xextended_error_logging;
271extern int ql4xdontresethba; 279extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 8fc8548ba4ba..7456eeb2e58a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -107,7 +107,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
107 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in); 107 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
108 writel(0, 108 writel(0,
109 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out); 109 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
110 } else if (is_qla8032(ha)) { 110 } else if (is_qla8032(ha) || is_qla8042(ha)) {
111 writel(0, 111 writel(0,
112 (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in); 112 (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
113 writel(0, 113 writel(0,
@@ -940,7 +940,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
940 * while switching from polling to interrupt mode. IOCB interrupts are 940 * while switching from polling to interrupt mode. IOCB interrupts are
941 * enabled via isp_ops->enable_intrs. 941 * enabled via isp_ops->enable_intrs.
942 */ 942 */
943 if (is_qla8032(ha)) 943 if (is_qla8032(ha) || is_qla8042(ha))
944 qla4_83xx_enable_mbox_intrs(ha); 944 qla4_83xx_enable_mbox_intrs(ha);
945 945
946 if (qla4xxx_about_firmware(ha) == QLA_ERROR) 946 if (qla4xxx_about_firmware(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 6f4decd44c6a..8503ad643bdd 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index fad71ed067ec..e5697ab144d2 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 482287f4005f..7dff09f09b71 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -588,7 +588,7 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
588{ 588{
589 int rval = 1; 589 int rval = 1;
590 590
591 if (is_qla8032(ha)) { 591 if (is_qla8032(ha) || is_qla8042(ha)) {
592 if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) || 592 if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
593 (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) { 593 (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
594 DEBUG2(ql4_printk(KERN_INFO, ha, 594 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -621,7 +621,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
621 uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; 621 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
622 __le32 __iomem *mailbox_out; 622 __le32 __iomem *mailbox_out;
623 623
624 if (is_qla8032(ha)) 624 if (is_qla8032(ha) || is_qla8042(ha))
625 mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0]; 625 mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
626 else if (is_qla8022(ha)) 626 else if (is_qla8022(ha))
627 mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0]; 627 mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
@@ -665,7 +665,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
665 qla4xxx_dump_registers(ha); 665 qla4xxx_dump_registers(ha);
666 666
667 if ((is_qla8022(ha) && ql4xdontresethba) || 667 if ((is_qla8022(ha) && ql4xdontresethba) ||
668 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { 668 ((is_qla8032(ha) || is_qla8042(ha)) &&
669 qla4_83xx_idc_dontreset(ha))) {
669 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", 670 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
670 ha->host_no, __func__)); 671 ha->host_no, __func__));
671 } else { 672 } else {
@@ -744,17 +745,23 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
744 * mbox_sts[3] = new ACB state */ 745 * mbox_sts[3] = new ACB state */
745 if ((mbox_sts[3] == ACB_STATE_VALID) && 746 if ((mbox_sts[3] == ACB_STATE_VALID) &&
746 ((mbox_sts[2] == ACB_STATE_TENTATIVE) || 747 ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
747 (mbox_sts[2] == ACB_STATE_ACQUIRING))) 748 (mbox_sts[2] == ACB_STATE_ACQUIRING))) {
748 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 749 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
749 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && 750 } else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
750 (mbox_sts[2] == ACB_STATE_VALID)) { 751 (mbox_sts[2] == ACB_STATE_VALID)) {
751 if (is_qla80XX(ha)) 752 if (is_qla80XX(ha))
752 set_bit(DPC_RESET_HA_FW_CONTEXT, 753 set_bit(DPC_RESET_HA_FW_CONTEXT,
753 &ha->dpc_flags); 754 &ha->dpc_flags);
754 else 755 else
755 set_bit(DPC_RESET_HA, &ha->dpc_flags); 756 set_bit(DPC_RESET_HA, &ha->dpc_flags);
756 } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) 757 } else if (mbox_sts[3] == ACB_STATE_DISABLING) {
758 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
759 ha->host_no, __func__);
760 } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) {
757 complete(&ha->disable_acb_comp); 761 complete(&ha->disable_acb_comp);
762 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
763 ha->host_no, __func__);
764 }
758 break; 765 break;
759 766
760 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 767 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
@@ -836,7 +843,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
836 case MBOX_ASTS_IDC_REQUEST_NOTIFICATION: 843 case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
837 { 844 {
838 uint32_t opcode; 845 uint32_t opcode;
839 if (is_qla8032(ha)) { 846 if (is_qla8032(ha) || is_qla8042(ha)) {
840 DEBUG2(ql4_printk(KERN_INFO, ha, 847 DEBUG2(ql4_printk(KERN_INFO, ha,
841 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", 848 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
842 ha->host_no, mbox_sts[0], 849 ha->host_no, mbox_sts[0],
@@ -858,7 +865,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
858 } 865 }
859 866
860 case MBOX_ASTS_IDC_COMPLETE: 867 case MBOX_ASTS_IDC_COMPLETE:
861 if (is_qla8032(ha)) { 868 if (is_qla8032(ha) || is_qla8042(ha)) {
862 DEBUG2(ql4_printk(KERN_INFO, ha, 869 DEBUG2(ql4_printk(KERN_INFO, ha,
863 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", 870 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
864 ha->host_no, mbox_sts[0], 871 ha->host_no, mbox_sts[0],
@@ -868,10 +875,15 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
868 "scsi:%ld: AEN %04x IDC Complete notification\n", 875 "scsi:%ld: AEN %04x IDC Complete notification\n",
869 ha->host_no, mbox_sts[0])); 876 ha->host_no, mbox_sts[0]));
870 877
871 if (qla4_83xx_loopback_in_progress(ha)) 878 if (qla4_83xx_loopback_in_progress(ha)) {
872 set_bit(AF_LOOPBACK, &ha->flags); 879 set_bit(AF_LOOPBACK, &ha->flags);
873 else 880 } else {
874 clear_bit(AF_LOOPBACK, &ha->flags); 881 clear_bit(AF_LOOPBACK, &ha->flags);
882 if (ha->saved_acb)
883 set_bit(DPC_RESTORE_ACB,
884 &ha->dpc_flags);
885 }
886 qla4xxx_wake_dpc(ha);
875 } 887 }
876 break; 888 break;
877 889
@@ -886,6 +898,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
886 ha->host_no, mbox_sts[0])); 898 ha->host_no, mbox_sts[0]));
887 break; 899 break;
888 900
901 case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
902 DEBUG2(ql4_printk(KERN_INFO, ha,
903 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
904 ha->host_no, mbox_sts[0], mbox_sts[1],
905 mbox_sts[2], mbox_sts[3], mbox_sts[4],
906 mbox_sts[5]));
907 DEBUG2(ql4_printk(KERN_INFO, ha,
908 "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
909 ha->host_no, mbox_sts[0]));
910 break;
911
889 case MBOX_ASTS_INITIALIZATION_FAILED: 912 case MBOX_ASTS_INITIALIZATION_FAILED:
890 DEBUG2(ql4_printk(KERN_INFO, ha, 913 DEBUG2(ql4_printk(KERN_INFO, ha,
891 "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n", 914 "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
@@ -1297,7 +1320,7 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1297 uint32_t intr_status; 1320 uint32_t intr_status;
1298 uint8_t reqs_count = 0; 1321 uint8_t reqs_count = 0;
1299 1322
1300 if (is_qla8032(ha)) { 1323 if (is_qla8032(ha) || is_qla8042(ha)) {
1301 qla4_83xx_mailbox_intr_handler(irq, dev_id); 1324 qla4_83xx_mailbox_intr_handler(irq, dev_id);
1302 } else { 1325 } else {
1303 spin_lock_irqsave(&ha->hardware_lock, flags); 1326 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1334,7 +1357,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1334 uint32_t ival = 0; 1357 uint32_t ival = 0;
1335 1358
1336 spin_lock_irqsave(&ha->hardware_lock, flags); 1359 spin_lock_irqsave(&ha->hardware_lock, flags);
1337 if (is_qla8032(ha)) { 1360 if (is_qla8032(ha) || is_qla8042(ha)) {
1338 ival = readl(&ha->qla4_83xx_reg->iocb_int_mask); 1361 ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
1339 if (ival == 0) { 1362 if (ival == 0) {
1340 ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n", 1363 ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
@@ -1425,10 +1448,10 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1425 goto try_intx; 1448 goto try_intx;
1426 1449
1427 if (ql4xenablemsix == 2) { 1450 if (ql4xenablemsix == 2) {
1428 /* Note: MSI Interrupts not supported for ISP8324 */ 1451 /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
1429 if (is_qla8032(ha)) { 1452 if (is_qla8032(ha) || is_qla8042(ha)) {
1430 ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n", 1453 ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
1431 __func__); 1454 __func__, ha->pdev->device);
1432 goto try_intx; 1455 goto try_intx;
1433 } 1456 }
1434 goto try_msi; 1457 goto try_msi;
@@ -1444,9 +1467,9 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1444 "MSI-X: Enabled (0x%X).\n", ha->revision_id)); 1467 "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1445 goto irq_attached; 1468 goto irq_attached;
1446 } else { 1469 } else {
1447 if (is_qla8032(ha)) { 1470 if (is_qla8032(ha) || is_qla8042(ha)) {
1448 ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n", 1471 ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
1449 __func__, ret); 1472 __func__, ha->pdev->device, ret);
1450 goto try_intx; 1473 goto try_intx;
1451 } 1474 }
1452 } 1475 }
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index a501beab3ffe..62d4208af21f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#include <linux/ctype.h>
8#include "ql4_def.h" 9#include "ql4_def.h"
9#include "ql4_glbl.h" 10#include "ql4_glbl.h"
10#include "ql4_dbg.h" 11#include "ql4_dbg.h"
@@ -52,7 +53,7 @@ static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
52{ 53{
53 int rval = 1; 54 int rval = 1;
54 55
55 if (is_qla8032(ha)) { 56 if (is_qla8032(ha) || is_qla8042(ha)) {
56 if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && 57 if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
57 test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) 58 test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
58 rval = 0; 59 rval = 0;
@@ -223,7 +224,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
223 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 224 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
224 CRB_NIU_XG_PAUSE_CTL_P0 | 225 CRB_NIU_XG_PAUSE_CTL_P0 |
225 CRB_NIU_XG_PAUSE_CTL_P1); 226 CRB_NIU_XG_PAUSE_CTL_P1);
226 } else if (is_qla8032(ha)) { 227 } else if (is_qla8032(ha) || is_qla8042(ha)) {
227 ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n", 228 ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
228 __func__); 229 __func__);
229 qla4_83xx_disable_pause(ha); 230 qla4_83xx_disable_pause(ha);
@@ -1270,16 +1271,28 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1270 } 1271 }
1271 1272
1272 /* Save version information. */ 1273 /* Save version information. */
1273 ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major); 1274 ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
1274 ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor); 1275 ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
1275 ha->patch_number = le16_to_cpu(about_fw->fw_patch); 1276 ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
1276 ha->build_number = le16_to_cpu(about_fw->fw_build); 1277 ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
1277 ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major); 1278 memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
1278 ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor); 1279 sizeof(about_fw->fw_build_date));
1279 ha->bootload_major = le16_to_cpu(about_fw->bootload_major); 1280 memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
1280 ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor); 1281 sizeof(about_fw->fw_build_time));
1281 ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch); 1282 strcpy((char *)ha->fw_info.fw_build_user,
1282 ha->bootload_build = le16_to_cpu(about_fw->bootload_build); 1283 skip_spaces((char *)about_fw->fw_build_user));
1284 ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
1285 ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
1286 ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
1287 ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
1288 ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
1289 ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
1290 ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
1291 strcpy((char *)ha->fw_info.extended_timestamp,
1292 skip_spaces((char *)about_fw->extended_timestamp));
1293
1294 ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
1295 ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
1283 status = QLA_SUCCESS; 1296 status = QLA_SUCCESS;
1284 1297
1285exit_about_fw: 1298exit_about_fw:
@@ -1723,6 +1736,45 @@ int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
1723 return status; 1736 return status;
1724} 1737}
1725 1738
1739/**
1740 * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
1741 * @ha: Pointer to host adapter structure.
1742 * @ext_tmo: idc timeout value
1743 *
1744 * Requests firmware to extend the idc timeout value.
1745 **/
1746static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
1747{
1748 uint32_t mbox_cmd[MBOX_REG_COUNT];
1749 uint32_t mbox_sts[MBOX_REG_COUNT];
1750 int status;
1751
1752 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1753 memset(&mbox_sts, 0, sizeof(mbox_sts));
1754 ext_tmo &= 0xf;
1755
1756 mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
1757 mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
1758 (ext_tmo << 8)); /* new timeout */
1759 mbox_cmd[2] = ha->idc_info.info1;
1760 mbox_cmd[3] = ha->idc_info.info2;
1761 mbox_cmd[4] = ha->idc_info.info3;
1762
1763 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
1764 mbox_cmd, mbox_sts);
1765 if (status != QLA_SUCCESS) {
1766 DEBUG2(ql4_printk(KERN_INFO, ha,
1767 "scsi%ld: %s: failed status %04X\n",
1768 ha->host_no, __func__, mbox_sts[0]));
1769 return QLA_ERROR;
1770 } else {
1771 ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
1772 __func__, ext_tmo);
1773 }
1774
1775 return QLA_SUCCESS;
1776}
1777
1726int qla4xxx_disable_acb(struct scsi_qla_host *ha) 1778int qla4xxx_disable_acb(struct scsi_qla_host *ha)
1727{ 1779{
1728 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1780 uint32_t mbox_cmd[MBOX_REG_COUNT];
@@ -1739,6 +1791,23 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha)
1739 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB " 1791 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
1740 "failed w/ status %04X %04X %04X", __func__, 1792 "failed w/ status %04X %04X %04X", __func__,
1741 mbox_sts[0], mbox_sts[1], mbox_sts[2])); 1793 mbox_sts[0], mbox_sts[1], mbox_sts[2]));
1794 } else {
1795 if (is_qla8042(ha) &&
1796 (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
1797 /*
1798 * Disable ACB mailbox command takes time to complete
1799 * based on the total number of targets connected.
1800 * For 512 targets, it took approximately 5 secs to
1801 * complete. Setting the timeout value to 8, with the 3
1802 * secs buffer.
1803 */
1804 qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
1805 if (!wait_for_completion_timeout(&ha->disable_acb_comp,
1806 IDC_EXTEND_TOV * HZ)) {
1807 ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
1808 __func__);
1809 }
1810 }
1742 } 1811 }
1743 return status; 1812 return status;
1744} 1813}
@@ -2145,8 +2214,80 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
2145 ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, 2214 ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
2146 mbox_sts[0]); 2215 mbox_sts[0]);
2147 else 2216 else
2148 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", 2217 ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
2149 __func__));
2150 2218
2151 return status; 2219 return status;
2152} 2220}
2221
2222int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
2223{
2224 uint32_t mbox_cmd[MBOX_REG_COUNT];
2225 uint32_t mbox_sts[MBOX_REG_COUNT];
2226 struct addr_ctrl_blk *acb = NULL;
2227 uint32_t acb_len = sizeof(struct addr_ctrl_blk);
2228 int rval = QLA_SUCCESS;
2229 dma_addr_t acb_dma;
2230
2231 acb = dma_alloc_coherent(&ha->pdev->dev,
2232 sizeof(struct addr_ctrl_blk),
2233 &acb_dma, GFP_KERNEL);
2234 if (!acb) {
2235 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
2236 rval = QLA_ERROR;
2237 goto exit_config_acb;
2238 }
2239 memset(acb, 0, acb_len);
2240
2241 switch (acb_config) {
2242 case ACB_CONFIG_DISABLE:
2243 rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
2244 if (rval != QLA_SUCCESS)
2245 goto exit_free_acb;
2246
2247 rval = qla4xxx_disable_acb(ha);
2248 if (rval != QLA_SUCCESS)
2249 goto exit_free_acb;
2250
2251 if (!ha->saved_acb)
2252 ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
2253
2254 if (!ha->saved_acb) {
2255 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
2256 __func__);
2257 rval = QLA_ERROR;
2258 goto exit_config_acb;
2259 }
2260 memcpy(ha->saved_acb, acb, acb_len);
2261 break;
2262 case ACB_CONFIG_SET:
2263
2264 if (!ha->saved_acb) {
2265 ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
2266 __func__);
2267 rval = QLA_ERROR;
2268 goto exit_free_acb;
2269 }
2270
2271 memcpy(acb, ha->saved_acb, acb_len);
2272 kfree(ha->saved_acb);
2273 ha->saved_acb = NULL;
2274
2275 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
2276 if (rval != QLA_SUCCESS)
2277 goto exit_free_acb;
2278
2279 break;
2280 default:
2281 ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
2282 __func__);
2283 }
2284
2285exit_free_acb:
2286 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
2287 acb_dma);
2288exit_config_acb:
2289 DEBUG2(ql4_printk(KERN_INFO, ha,
2290 "%s %s\n", __func__,
2291 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
2292 return rval;
2293}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 325db1f2c091..3bf418fbd432 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index dba0514d1c70..e97d79ff16f7 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index eaf00c162eb2..d001202d3565 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -1514,11 +1514,11 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1514 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); 1514 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1515 1515
1516 /* 1516 /*
1517 * For ISP8324, drv_active register has 1 bit per function, 1517 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1518 * shift 1 by func_num to set a bit for the function. 1518 * shift 1 by func_num to set a bit for the function.
1519 * For ISP8022, drv_active has 4 bits per function 1519 * For ISP8022, drv_active has 4 bits per function
1520 */ 1520 */
1521 if (is_qla8032(ha)) 1521 if (is_qla8032(ha) || is_qla8042(ha))
1522 drv_active |= (1 << ha->func_num); 1522 drv_active |= (1 << ha->func_num);
1523 else 1523 else
1524 drv_active |= (1 << (ha->func_num * 4)); 1524 drv_active |= (1 << (ha->func_num * 4));
@@ -1536,11 +1536,11 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1536 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); 1536 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1537 1537
1538 /* 1538 /*
1539 * For ISP8324, drv_active register has 1 bit per function, 1539 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1540 * shift 1 by func_num to set a bit for the function. 1540 * shift 1 by func_num to set a bit for the function.
1541 * For ISP8022, drv_active has 4 bits per function 1541 * For ISP8022, drv_active has 4 bits per function
1542 */ 1542 */
1543 if (is_qla8032(ha)) 1543 if (is_qla8032(ha) || is_qla8042(ha))
1544 drv_active &= ~(1 << (ha->func_num)); 1544 drv_active &= ~(1 << (ha->func_num));
1545 else 1545 else
1546 drv_active &= ~(1 << (ha->func_num * 4)); 1546 drv_active &= ~(1 << (ha->func_num * 4));
@@ -1559,11 +1559,11 @@ inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1559 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); 1559 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1560 1560
1561 /* 1561 /*
1562 * For ISP8324, drv_active register has 1 bit per function, 1562 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1563 * shift 1 by func_num to set a bit for the function. 1563 * shift 1 by func_num to set a bit for the function.
1564 * For ISP8022, drv_active has 4 bits per function 1564 * For ISP8022, drv_active has 4 bits per function
1565 */ 1565 */
1566 if (is_qla8032(ha)) 1566 if (is_qla8032(ha) || is_qla8042(ha))
1567 rval = drv_state & (1 << ha->func_num); 1567 rval = drv_state & (1 << ha->func_num);
1568 else 1568 else
1569 rval = drv_state & (1 << (ha->func_num * 4)); 1569 rval = drv_state & (1 << (ha->func_num * 4));
@@ -1581,11 +1581,11 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1581 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); 1581 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1582 1582
1583 /* 1583 /*
1584 * For ISP8324, drv_active register has 1 bit per function, 1584 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1585 * shift 1 by func_num to set a bit for the function. 1585 * shift 1 by func_num to set a bit for the function.
1586 * For ISP8022, drv_active has 4 bits per function 1586 * For ISP8022, drv_active has 4 bits per function
1587 */ 1587 */
1588 if (is_qla8032(ha)) 1588 if (is_qla8032(ha) || is_qla8042(ha))
1589 drv_state |= (1 << ha->func_num); 1589 drv_state |= (1 << ha->func_num);
1590 else 1590 else
1591 drv_state |= (1 << (ha->func_num * 4)); 1591 drv_state |= (1 << (ha->func_num * 4));
@@ -1602,11 +1602,11 @@ void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1602 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); 1602 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1603 1603
1604 /* 1604 /*
1605 * For ISP8324, drv_active register has 1 bit per function, 1605 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1606 * shift 1 by func_num to set a bit for the function. 1606 * shift 1 by func_num to set a bit for the function.
1607 * For ISP8022, drv_active has 4 bits per function 1607 * For ISP8022, drv_active has 4 bits per function
1608 */ 1608 */
1609 if (is_qla8032(ha)) 1609 if (is_qla8032(ha) || is_qla8042(ha))
1610 drv_state &= ~(1 << ha->func_num); 1610 drv_state &= ~(1 << ha->func_num);
1611 else 1611 else
1612 drv_state &= ~(1 << (ha->func_num * 4)); 1612 drv_state &= ~(1 << (ha->func_num * 4));
@@ -1624,11 +1624,11 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
1624 qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); 1624 qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1625 1625
1626 /* 1626 /*
1627 * For ISP8324, drv_active register has 1 bit per function, 1627 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1628 * shift 1 by func_num to set a bit for the function. 1628 * shift 1 by func_num to set a bit for the function.
1629 * For ISP8022, drv_active has 4 bits per function. 1629 * For ISP8022, drv_active has 4 bits per function.
1630 */ 1630 */
1631 if (is_qla8032(ha)) 1631 if (is_qla8032(ha) || is_qla8042(ha))
1632 qsnt_state |= (1 << ha->func_num); 1632 qsnt_state |= (1 << ha->func_num);
1633 else 1633 else
1634 qsnt_state |= (2 << (ha->func_num * 4)); 1634 qsnt_state |= (2 << (ha->func_num * 4));
@@ -1737,6 +1737,208 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1737 *d_ptr = data_ptr; 1737 *d_ptr = data_ptr;
1738} 1738}
1739 1739
1740static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha)
1741{
1742 int rval = QLA_SUCCESS;
1743 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
1744 uint64_t dma_base_addr = 0;
1745 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
1746
1747 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1748 ha->fw_dump_tmplt_hdr;
1749 dma_eng_num =
1750 tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
1751 dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
1752 (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
1753
1754 /* Read the pex-dma's command-status-and-control register. */
1755 rval = ha->isp_ops->rd_reg_indirect(ha,
1756 (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
1757 &cmd_sts_and_cntrl);
1758
1759 if (rval)
1760 return QLA_ERROR;
1761
1762 /* Check if requested pex-dma engine is available. */
1763 if (cmd_sts_and_cntrl & BIT_31)
1764 return QLA_SUCCESS;
1765 else
1766 return QLA_ERROR;
1767}
1768
1769static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha,
1770 struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr)
1771{
1772 int rval = QLA_SUCCESS, wait = 0;
1773 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
1774 uint64_t dma_base_addr = 0;
1775 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
1776
1777 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1778 ha->fw_dump_tmplt_hdr;
1779 dma_eng_num =
1780 tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
1781 dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
1782 (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
1783
1784 rval = ha->isp_ops->wr_reg_indirect(ha,
1785 dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW,
1786 m_hdr->desc_card_addr);
1787 if (rval)
1788 goto error_exit;
1789
1790 rval = ha->isp_ops->wr_reg_indirect(ha,
1791 dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0);
1792 if (rval)
1793 goto error_exit;
1794
1795 rval = ha->isp_ops->wr_reg_indirect(ha,
1796 dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL,
1797 m_hdr->start_dma_cmd);
1798 if (rval)
1799 goto error_exit;
1800
1801 /* Wait for dma operation to complete. */
1802 for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) {
1803 rval = ha->isp_ops->rd_reg_indirect(ha,
1804 (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
1805 &cmd_sts_and_cntrl);
1806 if (rval)
1807 goto error_exit;
1808
1809 if ((cmd_sts_and_cntrl & BIT_1) == 0)
1810 break;
1811 else
1812 udelay(10);
1813 }
1814
1815 /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
1816 if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) {
1817 rval = QLA_ERROR;
1818 goto error_exit;
1819 }
1820
1821error_exit:
1822 return rval;
1823}
1824
1825static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
1826 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1827 uint32_t **d_ptr)
1828{
1829 int rval = QLA_SUCCESS;
1830 struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
1831 uint32_t size, read_size;
1832 uint8_t *data_ptr = (uint8_t *)*d_ptr;
1833 void *rdmem_buffer = NULL;
1834 dma_addr_t rdmem_dma;
1835 struct qla4_83xx_pex_dma_descriptor dma_desc;
1836
1837 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1838
1839 rval = qla4_83xx_check_dma_engine_state(ha);
1840 if (rval != QLA_SUCCESS) {
1841 DEBUG2(ql4_printk(KERN_INFO, ha,
1842 "%s: DMA engine not available. Fallback to rdmem-read.\n",
1843 __func__));
1844 return QLA_ERROR;
1845 }
1846
1847 m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr;
1848 rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
1849 QLA83XX_PEX_DMA_READ_SIZE,
1850 &rdmem_dma, GFP_KERNEL);
1851 if (!rdmem_buffer) {
1852 DEBUG2(ql4_printk(KERN_INFO, ha,
1853 "%s: Unable to allocate rdmem dma buffer\n",
1854 __func__));
1855 return QLA_ERROR;
1856 }
1857
1858 /* Prepare pex-dma descriptor to be written to MS memory. */
1859 /* dma-desc-cmd layout:
1860 * 0-3: dma-desc-cmd 0-3
1861 * 4-7: pcid function number
1862 * 8-15: dma-desc-cmd 8-15
1863 */
1864 dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
1865 dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
1866 dma_desc.dma_bus_addr = rdmem_dma;
1867
1868 size = 0;
1869 read_size = 0;
1870 /*
1871 * Perform rdmem operation using pex-dma.
1872 * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE.
1873 */
1874 while (read_size < m_hdr->read_data_size) {
1875 if (m_hdr->read_data_size - read_size >=
1876 QLA83XX_PEX_DMA_READ_SIZE)
1877 size = QLA83XX_PEX_DMA_READ_SIZE;
1878 else {
1879 size = (m_hdr->read_data_size - read_size);
1880
1881 if (rdmem_buffer)
1882 dma_free_coherent(&ha->pdev->dev,
1883 QLA83XX_PEX_DMA_READ_SIZE,
1884 rdmem_buffer, rdmem_dma);
1885
1886 rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size,
1887 &rdmem_dma,
1888 GFP_KERNEL);
1889 if (!rdmem_buffer) {
1890 DEBUG2(ql4_printk(KERN_INFO, ha,
1891 "%s: Unable to allocate rdmem dma buffer\n",
1892 __func__));
1893 return QLA_ERROR;
1894 }
1895 dma_desc.dma_bus_addr = rdmem_dma;
1896 }
1897
1898 dma_desc.src_addr = m_hdr->read_addr + read_size;
1899 dma_desc.cmd.read_data_size = size;
1900
1901 /* Prepare: Write pex-dma descriptor to MS memory. */
1902 rval = qla4_83xx_ms_mem_write_128b(ha,
1903 (uint64_t)m_hdr->desc_card_addr,
1904 (uint32_t *)&dma_desc,
1905 (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
1906 if (rval == -1) {
1907 ql4_printk(KERN_INFO, ha,
1908 "%s: Error writing rdmem-dma-init to MS !!!\n",
1909 __func__);
1910 goto error_exit;
1911 }
1912
1913 DEBUG2(ql4_printk(KERN_INFO, ha,
1914 "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n",
1915 __func__, size));
1916 /* Execute: Start pex-dma operation. */
1917 rval = qla4_83xx_start_pex_dma(ha, m_hdr);
1918 if (rval != QLA_SUCCESS) {
1919 DEBUG2(ql4_printk(KERN_INFO, ha,
1920 "scsi(%ld): start-pex-dma failed rval=0x%x\n",
1921 ha->host_no, rval));
1922 goto error_exit;
1923 }
1924
1925 memcpy(data_ptr, rdmem_buffer, size);
1926 data_ptr += size;
1927 read_size += size;
1928 }
1929
1930 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
1931
1932 *d_ptr = (uint32_t *)data_ptr;
1933
1934error_exit:
1935 if (rdmem_buffer)
1936 dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer,
1937 rdmem_dma);
1938
1939 return rval;
1940}
1941
1740static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, 1942static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1741 struct qla8xxx_minidump_entry_hdr *entry_hdr, 1943 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1742 uint32_t **d_ptr) 1944 uint32_t **d_ptr)
@@ -2068,7 +2270,7 @@ static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
2068#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 2270#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
2069#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 2271#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
2070 2272
2071static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, 2273static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2072 struct qla8xxx_minidump_entry_hdr *entry_hdr, 2274 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2073 uint32_t **d_ptr) 2275 uint32_t **d_ptr)
2074{ 2276{
@@ -2150,6 +2352,28 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2150 return QLA_SUCCESS; 2352 return QLA_SUCCESS;
2151} 2353}
2152 2354
2355static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2356 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2357 uint32_t **d_ptr)
2358{
2359 uint32_t *data_ptr = *d_ptr;
2360 int rval = QLA_SUCCESS;
2361
2362 if (is_qla8032(ha) || is_qla8042(ha)) {
2363 rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr,
2364 &data_ptr);
2365 if (rval != QLA_SUCCESS) {
2366 rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2367 &data_ptr);
2368 }
2369 } else {
2370 rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2371 &data_ptr);
2372 }
2373 *d_ptr = data_ptr;
2374 return rval;
2375}
2376
2153static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, 2377static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2154 struct qla8xxx_minidump_entry_hdr *entry_hdr, 2378 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2155 int index) 2379 int index)
@@ -2398,13 +2622,13 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2398 (((uint8_t *)ha->fw_dump_tmplt_hdr) + 2622 (((uint8_t *)ha->fw_dump_tmplt_hdr) +
2399 tmplt_hdr->first_entry_offset); 2623 tmplt_hdr->first_entry_offset);
2400 2624
2401 if (is_qla8032(ha)) 2625 if (is_qla8032(ha) || is_qla8042(ha))
2402 tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] = 2626 tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
2403 tmplt_hdr->ocm_window_reg[ha->func_num]; 2627 tmplt_hdr->ocm_window_reg[ha->func_num];
2404 2628
2405 /* Walk through the entry headers - validate/perform required action */ 2629 /* Walk through the entry headers - validate/perform required action */
2406 for (i = 0; i < num_entry_hdr; i++) { 2630 for (i = 0; i < num_entry_hdr; i++) {
2407 if (data_collected >= ha->fw_dump_size) { 2631 if (data_collected > ha->fw_dump_size) {
2408 ql4_printk(KERN_INFO, ha, 2632 ql4_printk(KERN_INFO, ha,
2409 "Data collected: [0x%x], Total Dump size: [0x%x]\n", 2633 "Data collected: [0x%x], Total Dump size: [0x%x]\n",
2410 data_collected, ha->fw_dump_size); 2634 data_collected, ha->fw_dump_size);
@@ -2455,7 +2679,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2455 if (is_qla8022(ha)) { 2679 if (is_qla8022(ha)) {
2456 qla4_82xx_minidump_process_rdrom(ha, entry_hdr, 2680 qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
2457 &data_ptr); 2681 &data_ptr);
2458 } else if (is_qla8032(ha)) { 2682 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2459 rval = qla4_83xx_minidump_process_rdrom(ha, 2683 rval = qla4_83xx_minidump_process_rdrom(ha,
2460 entry_hdr, 2684 entry_hdr,
2461 &data_ptr); 2685 &data_ptr);
@@ -2496,7 +2720,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2496 &data_ptr); 2720 &data_ptr);
2497 break; 2721 break;
2498 case QLA83XX_POLLRD: 2722 case QLA83XX_POLLRD:
2499 if (!is_qla8032(ha)) { 2723 if (is_qla8022(ha)) {
2500 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2724 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2501 break; 2725 break;
2502 } 2726 }
@@ -2506,7 +2730,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2506 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2730 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2507 break; 2731 break;
2508 case QLA83XX_RDMUX2: 2732 case QLA83XX_RDMUX2:
2509 if (!is_qla8032(ha)) { 2733 if (is_qla8022(ha)) {
2510 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2734 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2511 break; 2735 break;
2512 } 2736 }
@@ -2514,7 +2738,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2514 &data_ptr); 2738 &data_ptr);
2515 break; 2739 break;
2516 case QLA83XX_POLLRDMWR: 2740 case QLA83XX_POLLRDMWR:
2517 if (!is_qla8032(ha)) { 2741 if (is_qla8022(ha)) {
2518 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2742 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2519 break; 2743 break;
2520 } 2744 }
@@ -2529,9 +2753,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2529 break; 2753 break;
2530 } 2754 }
2531 2755
2532 data_collected = (uint8_t *)data_ptr - 2756 data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump;
2533 ((uint8_t *)((uint8_t *)ha->fw_dump +
2534 ha->fw_dump_tmplt_size));
2535skip_nxt_entry: 2757skip_nxt_entry:
2536 /* next entry in the template */ 2758 /* next entry in the template */
2537 entry_hdr = (struct qla8xxx_minidump_entry_hdr *) 2759 entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
@@ -2539,10 +2761,11 @@ skip_nxt_entry:
2539 entry_hdr->entry_size); 2761 entry_hdr->entry_size);
2540 } 2762 }
2541 2763
2542 if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) { 2764 if (data_collected != ha->fw_dump_size) {
2543 ql4_printk(KERN_INFO, ha, 2765 ql4_printk(KERN_INFO, ha,
2544 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", 2766 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
2545 data_collected, ha->fw_dump_size); 2767 data_collected, ha->fw_dump_size);
2768 rval = QLA_ERROR;
2546 goto md_failed; 2769 goto md_failed;
2547 } 2770 }
2548 2771
@@ -2642,10 +2865,10 @@ dev_initialize:
2642 QLA8XXX_DEV_INITIALIZING); 2865 QLA8XXX_DEV_INITIALIZING);
2643 2866
2644 /* 2867 /*
2645 * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after 2868 * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set,
2646 * device goes to INIT state. 2869 * reset it after device goes to INIT state.
2647 */ 2870 */
2648 if (is_qla8032(ha)) { 2871 if (is_qla8032(ha) || is_qla8042(ha)) {
2649 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 2872 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
2650 if (idc_ctrl & GRACEFUL_RESET_BIT1) { 2873 if (idc_ctrl & GRACEFUL_RESET_BIT1) {
2651 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 2874 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
@@ -2846,7 +3069,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
2846 * If we are the first driver to load and 3069 * If we are the first driver to load and
2847 * ql4xdontresethba is not set, clear IDC_CTRL BIT0. 3070 * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
2848 */ 3071 */
2849 if (is_qla8032(ha)) { 3072 if (is_qla8032(ha) || is_qla8042(ha)) {
2850 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); 3073 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
2851 if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba) 3074 if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
2852 qla4_83xx_clear_idc_dontreset(ha); 3075 qla4_83xx_clear_idc_dontreset(ha);
@@ -2854,7 +3077,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
2854 3077
2855 if (is_qla8022(ha)) { 3078 if (is_qla8022(ha)) {
2856 qla4_82xx_set_idc_ver(ha); 3079 qla4_82xx_set_idc_ver(ha);
2857 } else if (is_qla8032(ha)) { 3080 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2858 rval = qla4_83xx_set_idc_ver(ha); 3081 rval = qla4_83xx_set_idc_ver(ha);
2859 if (rval == QLA_ERROR) 3082 if (rval == QLA_ERROR)
2860 qla4_8xxx_clear_drv_active(ha); 3083 qla4_8xxx_clear_drv_active(ha);
@@ -2922,11 +3145,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
2922 break; 3145 break;
2923 case QLA8XXX_DEV_NEED_RESET: 3146 case QLA8XXX_DEV_NEED_RESET:
2924 /* 3147 /*
2925 * For ISP8324, if NEED_RESET is set by any driver, 3148 * For ISP8324 and ISP8042, if NEED_RESET is set by any
2926 * it should be honored, irrespective of IDC_CTRL 3149 * driver, it should be honored, irrespective of
2927 * DONTRESET_BIT0 3150 * IDC_CTRL DONTRESET_BIT0
2928 */ 3151 */
2929 if (is_qla8032(ha)) { 3152 if (is_qla8032(ha) || is_qla8042(ha)) {
2930 qla4_83xx_need_reset_handler(ha); 3153 qla4_83xx_need_reset_handler(ha);
2931 } else if (is_qla8022(ha)) { 3154 } else if (is_qla8022(ha)) {
2932 if (!ql4xdontresethba) { 3155 if (!ql4xdontresethba) {
@@ -2976,7 +3199,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
2976 int retval; 3199 int retval;
2977 3200
2978 /* clear the interrupt */ 3201 /* clear the interrupt */
2979 if (is_qla8032(ha)) { 3202 if (is_qla8032(ha) || is_qla8042(ha)) {
2980 writel(0, &ha->qla4_83xx_reg->risc_intr); 3203 writel(0, &ha->qla4_83xx_reg->risc_intr);
2981 readl(&ha->qla4_83xx_reg->risc_intr); 3204 readl(&ha->qla4_83xx_reg->risc_intr);
2982 } else if (is_qla8022(ha)) { 3205 } else if (is_qla8022(ha)) {
@@ -3094,7 +3317,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
3094 if (is_qla8022(ha)) { 3317 if (is_qla8022(ha)) {
3095 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 3318 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3096 flt_addr << 2, OPTROM_BURST_SIZE); 3319 flt_addr << 2, OPTROM_BURST_SIZE);
3097 } else if (is_qla8032(ha)) { 3320 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3098 status = qla4_83xx_flash_read_u32(ha, flt_addr << 2, 3321 status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
3099 (uint8_t *)ha->request_ring, 3322 (uint8_t *)ha->request_ring,
3100 0x400); 3323 0x400);
@@ -3326,7 +3549,7 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
3326 if (is_qla8022(ha)) { 3549 if (is_qla8022(ha)) {
3327 qla4_82xx_get_fdt_info(ha); 3550 qla4_82xx_get_fdt_info(ha);
3328 qla4_82xx_get_idc_param(ha); 3551 qla4_82xx_get_idc_param(ha);
3329 } else if (is_qla8032(ha)) { 3552 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3330 qla4_83xx_get_idc_param(ha); 3553 qla4_83xx_get_idc_param(ha);
3331 } 3554 }
3332 3555
@@ -3436,7 +3659,7 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
3436 } 3659 }
3437 3660
3438 /* Make sure we receive the minimum required data to cache internally */ 3661 /* Make sure we receive the minimum required data to cache internally */
3439 if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) < 3662 if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) <
3440 offsetof(struct mbx_sys_info, reserved)) { 3663 offsetof(struct mbx_sys_info, reserved)) {
3441 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" 3664 DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
3442 " error (%x)\n", ha->host_no, __func__, mbox_sts[4])); 3665 " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 9dc0bbfe50d5..14500a0f62cc 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b246b3c26912..f8a0a26a3cd4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -378,6 +378,44 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
378 case ISCSI_PARAM_PASSWORD: 378 case ISCSI_PARAM_PASSWORD:
379 case ISCSI_PARAM_USERNAME_IN: 379 case ISCSI_PARAM_USERNAME_IN:
380 case ISCSI_PARAM_PASSWORD_IN: 380 case ISCSI_PARAM_PASSWORD_IN:
381 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
382 case ISCSI_PARAM_DISCOVERY_SESS:
383 case ISCSI_PARAM_PORTAL_TYPE:
384 case ISCSI_PARAM_CHAP_AUTH_EN:
385 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
386 case ISCSI_PARAM_BIDI_CHAP_EN:
387 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
388 case ISCSI_PARAM_DEF_TIME2WAIT:
389 case ISCSI_PARAM_DEF_TIME2RETAIN:
390 case ISCSI_PARAM_HDRDGST_EN:
391 case ISCSI_PARAM_DATADGST_EN:
392 case ISCSI_PARAM_INITIAL_R2T_EN:
393 case ISCSI_PARAM_IMM_DATA_EN:
394 case ISCSI_PARAM_PDU_INORDER_EN:
395 case ISCSI_PARAM_DATASEQ_INORDER_EN:
396 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
397 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
398 case ISCSI_PARAM_TCP_WSF_DISABLE:
399 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
400 case ISCSI_PARAM_TCP_TIMER_SCALE:
401 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
402 case ISCSI_PARAM_TCP_XMIT_WSF:
403 case ISCSI_PARAM_TCP_RECV_WSF:
404 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
405 case ISCSI_PARAM_IPV4_TOS:
406 case ISCSI_PARAM_IPV6_TC:
407 case ISCSI_PARAM_IPV6_FLOW_LABEL:
408 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
409 case ISCSI_PARAM_KEEPALIVE_TMO:
410 case ISCSI_PARAM_LOCAL_PORT:
411 case ISCSI_PARAM_ISID:
412 case ISCSI_PARAM_TSID:
413 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
414 case ISCSI_PARAM_ERL:
415 case ISCSI_PARAM_STATSN:
416 case ISCSI_PARAM_EXP_STATSN:
417 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
418 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
381 return S_IRUGO; 419 return S_IRUGO;
382 default: 420 default:
383 return 0; 421 return 0;
@@ -2218,19 +2256,23 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2218 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 2256 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2219 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); 2257 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2220 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); 2258 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2221 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2222 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 2259 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2223 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 2260 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2224 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 2261 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2225 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); 2262 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2226 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); 2263 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2227 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); 2264 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
2228 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 2265 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2229 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 2266 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2230 fw_ddb_entry->port = cpu_to_le16(conn->port); 2267 fw_ddb_entry->port = cpu_to_le16(conn->port);
2231 fw_ddb_entry->def_timeout = 2268 fw_ddb_entry->def_timeout =
2232 cpu_to_le16(sess->default_taskmgmt_timeout); 2269 cpu_to_le16(sess->default_taskmgmt_timeout);
2233 2270
2271 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2272 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
2273 else
2274 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2275
2234 if (conn->ipaddress) 2276 if (conn->ipaddress)
2235 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, 2277 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2236 sizeof(fw_ddb_entry->ip_addr)); 2278 sizeof(fw_ddb_entry->ip_addr));
@@ -2257,6 +2299,101 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2257 return rc; 2299 return rc;
2258} 2300}
2259 2301
2302static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
2303 struct iscsi_session *sess,
2304 struct dev_db_entry *fw_ddb_entry)
2305{
2306 unsigned long options = 0;
2307 uint16_t ddb_link;
2308 uint16_t disc_parent;
2309
2310 options = le16_to_cpu(fw_ddb_entry->options);
2311 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2312 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2313 &options);
2314 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2315
2316 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2317 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2318 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2319 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2320 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2321 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2322 &options);
2323 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2324 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2325 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2326 &options);
2327 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2328 sess->discovery_auth_optional =
2329 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2330 if (test_bit(ISCSIOPT_ERL1, &options))
2331 sess->erl |= BIT_1;
2332 if (test_bit(ISCSIOPT_ERL0, &options))
2333 sess->erl |= BIT_0;
2334
2335 options = le16_to_cpu(fw_ddb_entry->tcp_options);
2336 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2337 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2338 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2339 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2340 conn->tcp_timer_scale |= BIT_3;
2341 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2342 conn->tcp_timer_scale |= BIT_2;
2343 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2344 conn->tcp_timer_scale |= BIT_1;
2345
2346 conn->tcp_timer_scale >>= 1;
2347 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2348
2349 options = le16_to_cpu(fw_ddb_entry->ip_options);
2350 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2351
2352 conn->max_recv_dlength = BYTE_UNITS *
2353 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2354 conn->max_xmit_dlength = BYTE_UNITS *
2355 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2356 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2357 sess->first_burst = BYTE_UNITS *
2358 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2359 sess->max_burst = BYTE_UNITS *
2360 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2361 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2362 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2363 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2364 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2365 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2366 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2367 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2368 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
2369 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2370 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2371 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2372 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2373 COPY_ISID(sess->isid, fw_ddb_entry->isid);
2374
2375 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
2376 if (ddb_link < MAX_DDB_ENTRIES)
2377 sess->discovery_parent_idx = ddb_link;
2378 else
2379 sess->discovery_parent_idx = DDB_NO_LINK;
2380
2381 if (ddb_link == DDB_ISNS)
2382 disc_parent = ISCSI_DISC_PARENT_ISNS;
2383 else if (ddb_link == DDB_NO_LINK)
2384 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2385 else if (ddb_link < MAX_DDB_ENTRIES)
2386 disc_parent = ISCSI_DISC_PARENT_SENDTGT;
2387 else
2388 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
2389
2390 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
2391 iscsi_get_discovery_parent_name(disc_parent), 0);
2392
2393 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2394 (char *)fw_ddb_entry->iscsi_alias, 0);
2395}
2396
2260static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, 2397static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2261 struct dev_db_entry *fw_ddb_entry, 2398 struct dev_db_entry *fw_ddb_entry,
2262 struct iscsi_cls_session *cls_sess, 2399 struct iscsi_cls_session *cls_sess,
@@ -2275,47 +2412,29 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2275 2412
2276 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 2413 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2277 2414
2278 conn->max_recv_dlength = BYTE_UNITS * 2415 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2279 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2280
2281 conn->max_xmit_dlength = BYTE_UNITS *
2282 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2283
2284 sess->initial_r2t_en =
2285 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2286
2287 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2288
2289 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2290
2291 sess->first_burst = BYTE_UNITS *
2292 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2293
2294 sess->max_burst = BYTE_UNITS *
2295 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2296
2297 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2298
2299 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2300 2416
2417 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
2301 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); 2418 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
2302 2419
2303 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); 2420 memset(ip_addr, 0, sizeof(ip_addr));
2304
2305 options = le16_to_cpu(fw_ddb_entry->options); 2421 options = le16_to_cpu(fw_ddb_entry->options);
2306 if (options & DDB_OPT_IPV6_DEVICE) 2422 if (options & DDB_OPT_IPV6_DEVICE) {
2423 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
2424
2425 memset(ip_addr, 0, sizeof(ip_addr));
2307 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); 2426 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
2308 else 2427 } else {
2428 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
2309 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); 2429 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
2430 }
2310 2431
2432 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2433 (char *)ip_addr, buflen);
2311 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, 2434 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
2312 (char *)fw_ddb_entry->iscsi_name, buflen); 2435 (char *)fw_ddb_entry->iscsi_name, buflen);
2313 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, 2436 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
2314 (char *)ha->name_string, buflen); 2437 (char *)ha->name_string, buflen);
2315 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2316 (char *)ip_addr, buflen);
2317 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2318 (char *)fw_ddb_entry->iscsi_alias, buflen);
2319} 2438}
2320 2439
2321void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 2440void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -2403,37 +2522,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2403 2522
2404 /* Update params */ 2523 /* Update params */
2405 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); 2524 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2406 conn->max_recv_dlength = BYTE_UNITS * 2525 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
2407 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2408
2409 conn->max_xmit_dlength = BYTE_UNITS *
2410 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2411
2412 sess->initial_r2t_en =
2413 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2414
2415 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2416
2417 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2418
2419 sess->first_burst = BYTE_UNITS *
2420 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2421
2422 sess->max_burst = BYTE_UNITS *
2423 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2424
2425 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2426
2427 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2428
2429 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2430 2526
2431 memcpy(sess->initiatorname, ha->name_string, 2527 memcpy(sess->initiatorname, ha->name_string,
2432 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 2528 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2433 2529
2434 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2435 (char *)fw_ddb_entry->iscsi_alias, 0);
2436
2437exit_session_conn_param: 2530exit_session_conn_param:
2438 if (fw_ddb_entry) 2531 if (fw_ddb_entry)
2439 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 2532 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
@@ -2578,6 +2671,8 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2578 !test_bit(AF_ONLINE, &ha->flags) || 2671 !test_bit(AF_ONLINE, &ha->flags) ||
2579 !test_bit(AF_LINK_UP, &ha->flags) || 2672 !test_bit(AF_LINK_UP, &ha->flags) ||
2580 test_bit(AF_LOOPBACK, &ha->flags) || 2673 test_bit(AF_LOOPBACK, &ha->flags) ||
2674 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
2675 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
2581 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 2676 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2582 goto qc_host_busy; 2677 goto qc_host_busy;
2583 2678
@@ -2652,7 +2747,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2652 if (ha->nx_pcibase) 2747 if (ha->nx_pcibase)
2653 iounmap( 2748 iounmap(
2654 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 2749 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2655 } else if (is_qla8032(ha)) { 2750 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2656 if (ha->nx_pcibase) 2751 if (ha->nx_pcibase)
2657 iounmap( 2752 iounmap(
2658 (struct device_reg_83xx __iomem *)ha->nx_pcibase); 2753 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
@@ -2846,7 +2941,7 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2846 __func__); 2941 __func__);
2847 if (halt_status & HALT_STATUS_UNRECOVERABLE) 2942 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2848 halt_status_unrecoverable = 1; 2943 halt_status_unrecoverable = 1;
2849 } else if (is_qla8032(ha)) { 2944 } else if (is_qla8032(ha) || is_qla8042(ha)) {
2850 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) 2945 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
2851 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", 2946 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
2852 __func__); 2947 __func__);
@@ -2901,7 +2996,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2901 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", 2996 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
2902 __func__); 2997 __func__);
2903 2998
2904 if (is_qla8032(ha)) { 2999 if (is_qla8032(ha) || is_qla8042(ha)) {
2905 idc_ctrl = qla4_83xx_rd_reg(ha, 3000 idc_ctrl = qla4_83xx_rd_reg(ha,
2906 QLA83XX_IDC_DRV_CTRL); 3001 QLA83XX_IDC_DRV_CTRL);
2907 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { 3002 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
@@ -2912,7 +3007,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2912 } 3007 }
2913 } 3008 }
2914 3009
2915 if (is_qla8032(ha) || 3010 if ((is_qla8032(ha) || is_qla8042(ha)) ||
2916 (is_qla8022(ha) && !ql4xdontresethba)) { 3011 (is_qla8022(ha) && !ql4xdontresethba)) {
2917 set_bit(DPC_RESET_HA, &ha->dpc_flags); 3012 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2918 qla4xxx_wake_dpc(ha); 3013 qla4xxx_wake_dpc(ha);
@@ -3296,7 +3391,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3296 3391
3297 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 3392 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3298 3393
3299 if (is_qla8032(ha) && 3394 if ((is_qla8032(ha) || is_qla8042(ha)) &&
3300 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 3395 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3301 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 3396 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3302 __func__); 3397 __func__);
@@ -3494,7 +3589,9 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3494 } else { 3589 } else {
3495 /* Trigger relogin */ 3590 /* Trigger relogin */
3496 if (ddb_entry->ddb_type == FLASH_DDB) { 3591 if (ddb_entry->ddb_type == FLASH_DDB) {
3497 if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) 3592 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
3593 test_bit(DF_DISABLE_RELOGIN,
3594 &ddb_entry->flags)))
3498 qla4xxx_arm_relogin_timer(ddb_entry); 3595 qla4xxx_arm_relogin_timer(ddb_entry);
3499 } else 3596 } else
3500 iscsi_session_failure(cls_session->dd_data, 3597 iscsi_session_failure(cls_session->dd_data,
@@ -3597,6 +3694,9 @@ static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3597 if (!(ddb_entry->ddb_type == FLASH_DDB)) 3694 if (!(ddb_entry->ddb_type == FLASH_DDB))
3598 return; 3695 return;
3599 3696
3697 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
3698 return;
3699
3600 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && 3700 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3601 !iscsi_is_session_online(cls_sess)) { 3701 !iscsi_is_session_online(cls_sess)) {
3602 DEBUG2(ql4_printk(KERN_INFO, ha, 3702 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -3750,7 +3850,7 @@ static void qla4xxx_do_dpc(struct work_struct *work)
3750 3850
3751 if (is_qla80XX(ha)) { 3851 if (is_qla80XX(ha)) {
3752 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 3852 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3753 if (is_qla8032(ha)) { 3853 if (is_qla8032(ha) || is_qla8042(ha)) {
3754 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", 3854 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3755 __func__); 3855 __func__);
3756 /* disable pause frame for ISP83xx */ 3856 /* disable pause frame for ISP83xx */
@@ -3765,8 +3865,35 @@ static void qla4xxx_do_dpc(struct work_struct *work)
3765 qla4_8xxx_device_state_handler(ha); 3865 qla4_8xxx_device_state_handler(ha);
3766 } 3866 }
3767 3867
3768 if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) 3868 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
3869 if (is_qla8042(ha)) {
3870 if (ha->idc_info.info2 &
3871 ENABLE_INTERNAL_LOOPBACK) {
3872 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
3873 __func__);
3874 status = qla4_84xx_config_acb(ha,
3875 ACB_CONFIG_DISABLE);
3876 if (status != QLA_SUCCESS) {
3877 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
3878 __func__);
3879 }
3880 }
3881 }
3769 qla4_83xx_post_idc_ack(ha); 3882 qla4_83xx_post_idc_ack(ha);
3883 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
3884 }
3885
3886 if (is_qla8042(ha) &&
3887 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
3888 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
3889 __func__);
3890 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
3891 QLA_SUCCESS) {
3892 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
3893 __func__);
3894 }
3895 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
3896 }
3770 3897
3771 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 3898 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3772 qla4_8xxx_need_qsnt_handler(ha); 3899 qla4_8xxx_need_qsnt_handler(ha);
@@ -3778,7 +3905,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
3778 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 3905 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3779 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 3906 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3780 if ((is_qla8022(ha) && ql4xdontresethba) || 3907 if ((is_qla8022(ha) && ql4xdontresethba) ||
3781 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { 3908 ((is_qla8032(ha) || is_qla8042(ha)) &&
3909 qla4_83xx_idc_dontreset(ha))) {
3782 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 3910 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3783 ha->host_no, __func__)); 3911 ha->host_no, __func__));
3784 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 3912 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -3870,7 +3998,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3870 } else if (is_qla8022(ha)) { 3998 } else if (is_qla8022(ha)) {
3871 writel(0, &ha->qla4_82xx_reg->host_int); 3999 writel(0, &ha->qla4_82xx_reg->host_int);
3872 readl(&ha->qla4_82xx_reg->host_int); 4000 readl(&ha->qla4_82xx_reg->host_int);
3873 } else if (is_qla8032(ha)) { 4001 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3874 writel(0, &ha->qla4_83xx_reg->risc_intr); 4002 writel(0, &ha->qla4_83xx_reg->risc_intr);
3875 readl(&ha->qla4_83xx_reg->risc_intr); 4003 readl(&ha->qla4_83xx_reg->risc_intr);
3876 } 4004 }
@@ -3945,7 +4073,7 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3945 (ha->pdev->devfn << 11)); 4073 (ha->pdev->devfn << 11));
3946 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : 4074 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3947 QLA82XX_CAM_RAM_DB2); 4075 QLA82XX_CAM_RAM_DB2);
3948 } else if (is_qla8032(ha)) { 4076 } else if (is_qla8032(ha) || is_qla8042(ha)) {
3949 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) 4077 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
3950 ((uint8_t *)ha->nx_pcibase); 4078 ((uint8_t *)ha->nx_pcibase);
3951 } 4079 }
@@ -5609,7 +5737,8 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5609 goto exit_ddb_add; 5737 goto exit_ddb_add;
5610 } 5738 }
5611 5739
5612 for (idx = 0; idx < max_ddbs; idx++) { 5740 /* Index 0 and 1 are reserved for boot target entries */
5741 for (idx = 2; idx < max_ddbs; idx++) {
5613 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, 5742 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
5614 fw_ddb_entry_dma, idx)) 5743 fw_ddb_entry_dma, idx))
5615 break; 5744 break;
@@ -5925,13 +6054,6 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
5925 goto exit_ddb_logout; 6054 goto exit_ddb_logout;
5926 } 6055 }
5927 6056
5928 options = LOGOUT_OPTION_CLOSE_SESSION;
5929 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
5930 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
5931 ret = -EIO;
5932 goto exit_ddb_logout;
5933 }
5934
5935 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 6057 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5936 &fw_ddb_entry_dma, GFP_KERNEL); 6058 &fw_ddb_entry_dma, GFP_KERNEL);
5937 if (!fw_ddb_entry) { 6059 if (!fw_ddb_entry) {
@@ -5941,6 +6063,38 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
5941 goto exit_ddb_logout; 6063 goto exit_ddb_logout;
5942 } 6064 }
5943 6065
6066 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
6067 goto ddb_logout_init;
6068
6069 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
6070 fw_ddb_entry, fw_ddb_entry_dma,
6071 NULL, NULL, &ddb_state, NULL,
6072 NULL, NULL);
6073 if (ret == QLA_ERROR)
6074 goto ddb_logout_init;
6075
6076 if (ddb_state == DDB_DS_SESSION_ACTIVE)
6077 goto ddb_logout_init;
6078
6079 /* wait until next relogin is triggered using DF_RELOGIN and
6080 * clear DF_RELOGIN to avoid invocation of further relogin
6081 */
6082 wtime = jiffies + (HZ * RELOGIN_TOV);
6083 do {
6084 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
6085 goto ddb_logout_init;
6086
6087 schedule_timeout_uninterruptible(HZ);
6088 } while ((time_after(wtime, jiffies)));
6089
6090ddb_logout_init:
6091 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6092 atomic_set(&ddb_entry->relogin_timer, 0);
6093
6094 options = LOGOUT_OPTION_CLOSE_SESSION;
6095 qla4xxx_session_logout_ddb(ha, ddb_entry, options);
6096
6097 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
5944 wtime = jiffies + (HZ * LOGOUT_TOV); 6098 wtime = jiffies + (HZ * LOGOUT_TOV);
5945 do { 6099 do {
5946 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, 6100 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
@@ -5970,10 +6124,12 @@ ddb_logout_clr_sess:
5970 6124
5971 spin_lock_irqsave(&ha->hardware_lock, flags); 6125 spin_lock_irqsave(&ha->hardware_lock, flags);
5972 qla4xxx_free_ddb(ha, ddb_entry); 6126 qla4xxx_free_ddb(ha, ddb_entry);
6127 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
5973 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6128 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5974 6129
5975 iscsi_session_teardown(ddb_entry->sess); 6130 iscsi_session_teardown(ddb_entry->sess);
5976 6131
6132 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
5977 ret = QLA_SUCCESS; 6133 ret = QLA_SUCCESS;
5978 6134
5979exit_ddb_logout: 6135exit_ddb_logout:
@@ -6110,7 +6266,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6110 struct iscsi_bus_flash_conn *fnode_conn; 6266 struct iscsi_bus_flash_conn *fnode_conn;
6111 struct ql4_chap_table chap_tbl; 6267 struct ql4_chap_table chap_tbl;
6112 struct device *dev; 6268 struct device *dev;
6113 int parent_type, parent_index = 0xffff; 6269 int parent_type;
6114 int rc = 0; 6270 int rc = 0;
6115 6271
6116 dev = iscsi_find_flashnode_conn(fnode_sess); 6272 dev = iscsi_find_flashnode_conn(fnode_sess);
@@ -6276,10 +6432,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6276 rc = sprintf(buf, "\n"); 6432 rc = sprintf(buf, "\n");
6277 break; 6433 break;
6278 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 6434 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6279 if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES) 6435 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
6280 parent_index = fnode_sess->discovery_parent_idx;
6281
6282 rc = sprintf(buf, "%u\n", parent_index);
6283 break; 6436 break;
6284 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 6437 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6285 if (fnode_sess->discovery_parent_type == DDB_ISNS) 6438 if (fnode_sess->discovery_parent_type == DDB_ISNS)
@@ -6533,8 +6686,8 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6533 memcpy(fnode_conn->link_local_ipv6_addr, 6686 memcpy(fnode_conn->link_local_ipv6_addr,
6534 fnode_param->value, IPv6_ADDR_LEN); 6687 fnode_param->value, IPv6_ADDR_LEN);
6535 break; 6688 break;
6536 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: 6689 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6537 fnode_sess->discovery_parent_type = 6690 fnode_sess->discovery_parent_idx =
6538 *(uint16_t *)fnode_param->value; 6691 *(uint16_t *)fnode_param->value;
6539 break; 6692 break;
6540 case ISCSI_FLASHNODE_TCP_XMIT_WSF: 6693 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
@@ -6910,7 +7063,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
6910 nx_legacy_intr->tgt_status_reg; 7063 nx_legacy_intr->tgt_status_reg;
6911 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 7064 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
6912 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 7065 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
6913 } else if (is_qla8032(ha)) { 7066 } else if (is_qla8032(ha) || is_qla8042(ha)) {
6914 ha->isp_ops = &qla4_83xx_isp_ops; 7067 ha->isp_ops = &qla4_83xx_isp_ops;
6915 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; 7068 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
6916 } else { 7069 } else {
@@ -6981,7 +7134,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
6981 if (is_qla80XX(ha)) 7134 if (is_qla80XX(ha))
6982 qla4_8xxx_get_flash_info(ha); 7135 qla4_8xxx_get_flash_info(ha);
6983 7136
6984 if (is_qla8032(ha)) { 7137 if (is_qla8032(ha) || is_qla8042(ha)) {
6985 qla4_83xx_read_reset_template(ha); 7138 qla4_83xx_read_reset_template(ha);
6986 /* 7139 /*
6987 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. 7140 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
@@ -7036,7 +7189,8 @@ skip_retry_init:
7036 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 7189 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
7037 7190
7038 if ((is_qla8022(ha) && ql4xdontresethba) || 7191 if ((is_qla8022(ha) && ql4xdontresethba) ||
7039 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { 7192 ((is_qla8032(ha) || is_qla8042(ha)) &&
7193 qla4_83xx_idc_dontreset(ha))) {
7040 /* Put the device in failed state. */ 7194 /* Put the device in failed state. */
7041 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 7195 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
7042 ha->isp_ops->idc_lock(ha); 7196 ha->isp_ops->idc_lock(ha);
@@ -7097,8 +7251,8 @@ skip_retry_init:
7097 " QLogic iSCSI HBA Driver version: %s\n" 7251 " QLogic iSCSI HBA Driver version: %s\n"
7098 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 7252 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
7099 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 7253 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
7100 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 7254 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
7101 ha->patch_number, ha->build_number); 7255 ha->fw_info.fw_patch, ha->fw_info.fw_build);
7102 7256
7103 /* Set the driver version */ 7257 /* Set the driver version */
7104 if (is_qla80XX(ha)) 7258 if (is_qla80XX(ha))
@@ -7645,16 +7799,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
7645 7799
7646 ha = to_qla_host(cmd->device->host); 7800 ha = to_qla_host(cmd->device->host);
7647 7801
7648 if (is_qla8032(ha) && ql4xdontresethba) 7802 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
7649 qla4_83xx_set_idc_dontreset(ha); 7803 qla4_83xx_set_idc_dontreset(ha);
7650 7804
7651 /* 7805 /*
7652 * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other 7806 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
7653 * protocol drivers, we should not set device_state to 7807 * protocol drivers, we should not set device_state to NEED_RESET
7654 * NEED_RESET
7655 */ 7808 */
7656 if (ql4xdontresethba || 7809 if (ql4xdontresethba ||
7657 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { 7810 ((is_qla8032(ha) || is_qla8042(ha)) &&
7811 qla4_83xx_idc_dontreset(ha))) {
7658 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 7812 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
7659 ha->host_no, __func__)); 7813 ha->host_no, __func__));
7660 7814
@@ -7779,9 +7933,10 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
7779 } 7933 }
7780 7934
7781recover_adapter: 7935recover_adapter:
7782 /* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if 7936 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
7783 * reset is issued by application */ 7937 * reset is issued by application */
7784 if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 7938 if ((is_qla8032(ha) || is_qla8042(ha)) &&
7939 test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7785 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); 7940 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
7786 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, 7941 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
7787 (idc_ctrl | GRACEFUL_RESET_BIT1)); 7942 (idc_ctrl | GRACEFUL_RESET_BIT1));
@@ -8078,6 +8233,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
8078 .subvendor = PCI_ANY_ID, 8233 .subvendor = PCI_ANY_ID,
8079 .subdevice = PCI_ANY_ID, 8234 .subdevice = PCI_ANY_ID,
8080 }, 8235 },
8236 {
8237 .vendor = PCI_VENDOR_ID_QLOGIC,
8238 .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
8239 .subvendor = PCI_ANY_ID,
8240 .subdevice = PCI_ANY_ID,
8241 },
8081 {0, 0}, 8242 {0, 0},
8082}; 8243};
8083MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 8244MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index fe873cf7570d..f4fef72c9bcd 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation 3 * Copyright (c) 2003-2013 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.03.00-k9" 8#define QLA4XXX_DRIVER_VERSION "5.04.00-k1"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cb4fefa1bfba..01c0ffa31276 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1997,8 +1997,14 @@ static unsigned long lba_to_map_index(sector_t lba)
1997 1997
1998static sector_t map_index_to_lba(unsigned long index) 1998static sector_t map_index_to_lba(unsigned long index)
1999{ 1999{
2000 return index * scsi_debug_unmap_granularity - 2000 sector_t lba = index * scsi_debug_unmap_granularity;
2001 scsi_debug_unmap_alignment; 2001
2002 if (scsi_debug_unmap_alignment) {
2003 lba -= scsi_debug_unmap_granularity -
2004 scsi_debug_unmap_alignment;
2005 }
2006
2007 return lba;
2002} 2008}
2003 2009
2004static unsigned int map_state(sector_t lba, unsigned int *num) 2010static unsigned int map_state(sector_t lba, unsigned int *num)
@@ -2659,8 +2665,8 @@ static void __init sdebug_build_parts(unsigned char *ramp,
2659 / sdebug_sectors_per; 2665 / sdebug_sectors_per;
2660 pp->end_sector = (end_sec % sdebug_sectors_per) + 1; 2666 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2661 2667
2662 pp->start_sect = start_sec; 2668 pp->start_sect = cpu_to_le32(start_sec);
2663 pp->nr_sects = end_sec - start_sec + 1; 2669 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2664 pp->sys_ind = 0x83; /* plain Linux partition */ 2670 pp->sys_ind = 0x83; /* plain Linux partition */
2665 } 2671 }
2666} 2672}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 21505962f539..83e591b60193 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -223,12 +223,80 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
223} 223}
224#endif 224#endif
225 225
226 /**
227 * scsi_report_lun_change - Set flag on all *other* devices on the same target
228 * to indicate that a UNIT ATTENTION is expected.
229 * @sdev: Device reporting the UNIT ATTENTION
230 */
231static void scsi_report_lun_change(struct scsi_device *sdev)
232{
233 sdev->sdev_target->expecting_lun_change = 1;
234}
235
236/**
237 * scsi_report_sense - Examine scsi sense information and log messages for
238 * certain conditions, also issue uevents for some of them.
239 * @sdev: Device reporting the sense code
240 * @sshdr: sshdr to be examined
241 */
242static void scsi_report_sense(struct scsi_device *sdev,
243 struct scsi_sense_hdr *sshdr)
244{
245 enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */
246
247 if (sshdr->sense_key == UNIT_ATTENTION) {
248 if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
249 evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
250 sdev_printk(KERN_WARNING, sdev,
251 "Inquiry data has changed");
252 } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
253 evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
254 scsi_report_lun_change(sdev);
255 sdev_printk(KERN_WARNING, sdev,
256 "Warning! Received an indication that the "
257 "LUN assignments on this target have "
258 "changed. The Linux SCSI layer does not "
259 "automatically remap LUN assignments.\n");
260 } else if (sshdr->asc == 0x3f)
261 sdev_printk(KERN_WARNING, sdev,
262 "Warning! Received an indication that the "
263 "operating parameters on this target have "
264 "changed. The Linux SCSI layer does not "
265 "automatically adjust these parameters.\n");
266
267 if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
268 evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
269 sdev_printk(KERN_WARNING, sdev,
270 "Warning! Received an indication that the "
271 "LUN reached a thin provisioning soft "
272 "threshold.\n");
273 }
274
275 if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
276 evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
277 sdev_printk(KERN_WARNING, sdev,
278 "Mode parameters changed");
279 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
280 evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
281 sdev_printk(KERN_WARNING, sdev,
282 "Capacity data has changed");
283 } else if (sshdr->asc == 0x2a)
284 sdev_printk(KERN_WARNING, sdev,
285 "Parameters changed");
286 }
287
288 if (evt_type != SDEV_EVT_MAXBITS) {
289 set_bit(evt_type, sdev->pending_events);
290 schedule_work(&sdev->event_work);
291 }
292}
293
226/** 294/**
227 * scsi_check_sense - Examine scsi cmd sense 295 * scsi_check_sense - Examine scsi cmd sense
228 * @scmd: Cmd to have sense checked. 296 * @scmd: Cmd to have sense checked.
229 * 297 *
230 * Return value: 298 * Return value:
231 * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR 299 * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
232 * 300 *
233 * Notes: 301 * Notes:
234 * When a deferred error is detected the current command has 302 * When a deferred error is detected the current command has
@@ -250,6 +318,8 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
250 */ 318 */
251 return SUCCESS; 319 return SUCCESS;
252 320
321 scsi_report_sense(sdev, &sshdr);
322
253 if (scsi_sense_is_deferred(&sshdr)) 323 if (scsi_sense_is_deferred(&sshdr))
254 return NEEDS_RETRY; 324 return NEEDS_RETRY;
255 325
@@ -315,6 +385,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
315 } 385 }
316 } 386 }
317 /* 387 /*
388 * we might also expect a cc/ua if another LUN on the target
389 * reported a UA with an ASC/ASCQ of 3F 0E -
390 * REPORTED LUNS DATA HAS CHANGED.
391 */
392 if (scmd->device->sdev_target->expecting_lun_change &&
393 sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
394 return NEEDS_RETRY;
395 /*
318 * if the device is in the process of becoming ready, we 396 * if the device is in the process of becoming ready, we
319 * should retry. 397 * should retry.
320 */ 398 */
@@ -327,26 +405,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
327 if (scmd->device->allow_restart && 405 if (scmd->device->allow_restart &&
328 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 406 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
329 return FAILED; 407 return FAILED;
330
331 if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
332 scmd_printk(KERN_WARNING, scmd,
333 "Warning! Received an indication that the "
334 "LUN assignments on this target have "
335 "changed. The Linux SCSI layer does not "
336 "automatically remap LUN assignments.\n");
337 else if (sshdr.asc == 0x3f)
338 scmd_printk(KERN_WARNING, scmd,
339 "Warning! Received an indication that the "
340 "operating parameters on this target have "
341 "changed. The Linux SCSI layer does not "
342 "automatically adjust these parameters.\n");
343
344 if (sshdr.asc == 0x38 && sshdr.ascq == 0x07)
345 scmd_printk(KERN_WARNING, scmd,
346 "Warning! Received an indication that the "
347 "LUN reached a thin provisioning soft "
348 "threshold.\n");
349
350 /* 408 /*
351 * Pass the UA upwards for a determination in the completion 409 * Pass the UA upwards for a determination in the completion
352 * functions. 410 * functions.
@@ -354,18 +412,25 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
354 return SUCCESS; 412 return SUCCESS;
355 413
356 /* these are not supported */ 414 /* these are not supported */
415 case DATA_PROTECT:
416 if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
417 /* Thin provisioning hard threshold reached */
418 set_host_byte(scmd, DID_ALLOC_FAILURE);
419 return SUCCESS;
420 }
357 case COPY_ABORTED: 421 case COPY_ABORTED:
358 case VOLUME_OVERFLOW: 422 case VOLUME_OVERFLOW:
359 case MISCOMPARE: 423 case MISCOMPARE:
360 case BLANK_CHECK: 424 case BLANK_CHECK:
361 case DATA_PROTECT: 425 set_host_byte(scmd, DID_TARGET_FAILURE);
362 return TARGET_ERROR; 426 return SUCCESS;
363 427
364 case MEDIUM_ERROR: 428 case MEDIUM_ERROR:
365 if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ 429 if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
366 sshdr.asc == 0x13 || /* AMNF DATA FIELD */ 430 sshdr.asc == 0x13 || /* AMNF DATA FIELD */
367 sshdr.asc == 0x14) { /* RECORD NOT FOUND */ 431 sshdr.asc == 0x14) { /* RECORD NOT FOUND */
368 return TARGET_ERROR; 432 set_host_byte(scmd, DID_MEDIUM_ERROR);
433 return SUCCESS;
369 } 434 }
370 return NEEDS_RETRY; 435 return NEEDS_RETRY;
371 436
@@ -373,14 +438,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
373 if (scmd->device->retry_hwerror) 438 if (scmd->device->retry_hwerror)
374 return ADD_TO_MLQUEUE; 439 return ADD_TO_MLQUEUE;
375 else 440 else
376 return TARGET_ERROR; 441 set_host_byte(scmd, DID_TARGET_FAILURE);
377 442
378 case ILLEGAL_REQUEST: 443 case ILLEGAL_REQUEST:
379 if (sshdr.asc == 0x20 || /* Invalid command operation code */ 444 if (sshdr.asc == 0x20 || /* Invalid command operation code */
380 sshdr.asc == 0x21 || /* Logical block address out of range */ 445 sshdr.asc == 0x21 || /* Logical block address out of range */
381 sshdr.asc == 0x24 || /* Invalid field in cdb */ 446 sshdr.asc == 0x24 || /* Invalid field in cdb */
382 sshdr.asc == 0x26) { /* Parameter value invalid */ 447 sshdr.asc == 0x26) { /* Parameter value invalid */
383 return TARGET_ERROR; 448 set_host_byte(scmd, DID_TARGET_FAILURE);
384 } 449 }
385 return SUCCESS; 450 return SUCCESS;
386 451
@@ -843,7 +908,6 @@ retry:
843 case SUCCESS: 908 case SUCCESS:
844 case NEEDS_RETRY: 909 case NEEDS_RETRY:
845 case FAILED: 910 case FAILED:
846 case TARGET_ERROR:
847 break; 911 break;
848 case ADD_TO_MLQUEUE: 912 case ADD_TO_MLQUEUE:
849 rtn = NEEDS_RETRY; 913 rtn = NEEDS_RETRY;
@@ -1568,6 +1632,8 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1568 */ 1632 */
1569 return ADD_TO_MLQUEUE; 1633 return ADD_TO_MLQUEUE;
1570 case GOOD: 1634 case GOOD:
1635 if (scmd->cmnd[0] == REPORT_LUNS)
1636 scmd->device->sdev_target->expecting_lun_change = 0;
1571 scsi_handle_queue_ramp_up(scmd->device); 1637 scsi_handle_queue_ramp_up(scmd->device);
1572 case COMMAND_TERMINATED: 1638 case COMMAND_TERMINATED:
1573 return SUCCESS; 1639 return SUCCESS;
@@ -1577,14 +1643,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1577 rtn = scsi_check_sense(scmd); 1643 rtn = scsi_check_sense(scmd);
1578 if (rtn == NEEDS_RETRY) 1644 if (rtn == NEEDS_RETRY)
1579 goto maybe_retry; 1645 goto maybe_retry;
1580 else if (rtn == TARGET_ERROR) {
1581 /*
1582 * Need to modify host byte to signal a
1583 * permanent target failure
1584 */
1585 set_host_byte(scmd, DID_TARGET_FAILURE);
1586 rtn = SUCCESS;
1587 }
1588 /* if rtn == FAILED, we have no sense information; 1646 /* if rtn == FAILED, we have no sense information;
1589 * returning FAILED will wake the error handler thread 1647 * returning FAILED will wake the error handler thread
1590 * to collect the sense and redo the decide 1648 * to collect the sense and redo the decide
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 124392f3091e..d545931c85eb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -716,6 +716,20 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
716} 716}
717EXPORT_SYMBOL(scsi_release_buffers); 717EXPORT_SYMBOL(scsi_release_buffers);
718 718
719/**
720 * __scsi_error_from_host_byte - translate SCSI error code into errno
721 * @cmd: SCSI command (unused)
722 * @result: scsi error code
723 *
724 * Translate SCSI error code into standard UNIX errno.
725 * Return values:
726 * -ENOLINK temporary transport failure
727 * -EREMOTEIO permanent target failure, do not retry
728 * -EBADE permanent nexus failure, retry on other path
729 * -ENOSPC No write space available
730 * -ENODATA Medium error
731 * -EIO unspecified I/O error
732 */
719static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) 733static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
720{ 734{
721 int error = 0; 735 int error = 0;
@@ -732,6 +746,14 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
732 set_host_byte(cmd, DID_OK); 746 set_host_byte(cmd, DID_OK);
733 error = -EBADE; 747 error = -EBADE;
734 break; 748 break;
749 case DID_ALLOC_FAILURE:
750 set_host_byte(cmd, DID_OK);
751 error = -ENOSPC;
752 break;
753 case DID_MEDIUM_ERROR:
754 set_host_byte(cmd, DID_OK);
755 error = -ENODATA;
756 break;
735 default: 757 default:
736 error = -EIO; 758 error = -EIO;
737 break; 759 break;
@@ -2231,7 +2253,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2231 case SDEV_EVT_MEDIA_CHANGE: 2253 case SDEV_EVT_MEDIA_CHANGE:
2232 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2254 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2233 break; 2255 break;
2234 2256 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2257 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2258 break;
2259 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2260 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2261 break;
2262 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2263 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2264 break;
2265 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2266 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2267 break;
2268 case SDEV_EVT_LUN_CHANGE_REPORTED:
2269 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2270 break;
2235 default: 2271 default:
2236 /* do nothing */ 2272 /* do nothing */
2237 break; 2273 break;
@@ -2252,10 +2288,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2252void scsi_evt_thread(struct work_struct *work) 2288void scsi_evt_thread(struct work_struct *work)
2253{ 2289{
2254 struct scsi_device *sdev; 2290 struct scsi_device *sdev;
2291 enum scsi_device_event evt_type;
2255 LIST_HEAD(event_list); 2292 LIST_HEAD(event_list);
2256 2293
2257 sdev = container_of(work, struct scsi_device, event_work); 2294 sdev = container_of(work, struct scsi_device, event_work);
2258 2295
2296 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2297 if (test_and_clear_bit(evt_type, sdev->pending_events))
2298 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2299
2259 while (1) { 2300 while (1) {
2260 struct scsi_event *evt; 2301 struct scsi_event *evt;
2261 struct list_head *this, *tmp; 2302 struct list_head *this, *tmp;
@@ -2325,6 +2366,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2325 /* evt_type-specific initialization, if any */ 2366 /* evt_type-specific initialization, if any */
2326 switch (evt_type) { 2367 switch (evt_type) {
2327 case SDEV_EVT_MEDIA_CHANGE: 2368 case SDEV_EVT_MEDIA_CHANGE:
2369 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2370 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2371 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2372 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2373 case SDEV_EVT_LUN_CHANGE_REPORTED:
2328 default: 2374 default:
2329 /* do nothing */ 2375 /* do nothing */
2330 break; 2376 break;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7e50061e9ef6..40c639491b27 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -739,6 +739,11 @@ sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
739#define REF_EVT(name) &dev_attr_evt_##name.attr 739#define REF_EVT(name) &dev_attr_evt_##name.attr
740 740
741DECLARE_EVT(media_change, MEDIA_CHANGE) 741DECLARE_EVT(media_change, MEDIA_CHANGE)
742DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED)
743DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED)
744DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
745DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
746DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
742 747
743/* Default template for device attributes. May NOT be modified */ 748/* Default template for device attributes. May NOT be modified */
744static struct attribute *scsi_sdev_attrs[] = { 749static struct attribute *scsi_sdev_attrs[] = {
@@ -759,6 +764,11 @@ static struct attribute *scsi_sdev_attrs[] = {
759 &dev_attr_ioerr_cnt.attr, 764 &dev_attr_ioerr_cnt.attr,
760 &dev_attr_modalias.attr, 765 &dev_attr_modalias.attr,
761 REF_EVT(media_change), 766 REF_EVT(media_change),
767 REF_EVT(inquiry_change_reported),
768 REF_EVT(capacity_change_reported),
769 REF_EVT(soft_threshold_reached),
770 REF_EVT(mode_parameter_change_reported),
771 REF_EVT(lun_change_reported),
762 NULL 772 NULL
763}; 773};
764 774
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index abf7c402e1a5..e4a989fa477d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -25,7 +25,6 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/bsg-lib.h> 26#include <linux/bsg-lib.h>
27#include <linux/idr.h> 27#include <linux/idr.h>
28#include <linux/list.h>
29#include <net/tcp.h> 28#include <net/tcp.h>
30#include <scsi/scsi.h> 29#include <scsi/scsi.h>
31#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -3327,6 +3326,23 @@ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
3327iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); 3326iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
3328iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); 3327iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
3329iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); 3328iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
3329iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT);
3330iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN);
3331iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO);
3332iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE);
3333iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT);
3334iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE);
3335iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE);
3336iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE);
3337iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN);
3338iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE);
3339iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS);
3340iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC);
3341iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL);
3342iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);
3343iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
3344iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
3345
3330 3346
3331#define iscsi_conn_ep_attr_show(param) \ 3347#define iscsi_conn_ep_attr_show(param) \
3332static ssize_t show_conn_ep_param_##param(struct device *dev, \ 3348static ssize_t show_conn_ep_param_##param(struct device *dev, \
@@ -3379,6 +3395,22 @@ static struct attribute *iscsi_conn_attrs[] = {
3379 &dev_attr_conn_persistent_port.attr, 3395 &dev_attr_conn_persistent_port.attr,
3380 &dev_attr_conn_ping_tmo.attr, 3396 &dev_attr_conn_ping_tmo.attr,
3381 &dev_attr_conn_recv_tmo.attr, 3397 &dev_attr_conn_recv_tmo.attr,
3398 &dev_attr_conn_local_port.attr,
3399 &dev_attr_conn_statsn.attr,
3400 &dev_attr_conn_keepalive_tmo.attr,
3401 &dev_attr_conn_max_segment_size.attr,
3402 &dev_attr_conn_tcp_timestamp_stat.attr,
3403 &dev_attr_conn_tcp_wsf_disable.attr,
3404 &dev_attr_conn_tcp_nagle_disable.attr,
3405 &dev_attr_conn_tcp_timer_scale.attr,
3406 &dev_attr_conn_tcp_timestamp_enable.attr,
3407 &dev_attr_conn_fragment_disable.attr,
3408 &dev_attr_conn_ipv4_tos.attr,
3409 &dev_attr_conn_ipv6_traffic_class.attr,
3410 &dev_attr_conn_ipv6_flow_label.attr,
3411 &dev_attr_conn_is_fw_assigned_ipv6.attr,
3412 &dev_attr_conn_tcp_xmit_wsf.attr,
3413 &dev_attr_conn_tcp_recv_wsf.attr,
3382 NULL, 3414 NULL,
3383}; 3415};
3384 3416
@@ -3416,6 +3448,38 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
3416 param = ISCSI_PARAM_PING_TMO; 3448 param = ISCSI_PARAM_PING_TMO;
3417 else if (attr == &dev_attr_conn_recv_tmo.attr) 3449 else if (attr == &dev_attr_conn_recv_tmo.attr)
3418 param = ISCSI_PARAM_RECV_TMO; 3450 param = ISCSI_PARAM_RECV_TMO;
3451 else if (attr == &dev_attr_conn_local_port.attr)
3452 param = ISCSI_PARAM_LOCAL_PORT;
3453 else if (attr == &dev_attr_conn_statsn.attr)
3454 param = ISCSI_PARAM_STATSN;
3455 else if (attr == &dev_attr_conn_keepalive_tmo.attr)
3456 param = ISCSI_PARAM_KEEPALIVE_TMO;
3457 else if (attr == &dev_attr_conn_max_segment_size.attr)
3458 param = ISCSI_PARAM_MAX_SEGMENT_SIZE;
3459 else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr)
3460 param = ISCSI_PARAM_TCP_TIMESTAMP_STAT;
3461 else if (attr == &dev_attr_conn_tcp_wsf_disable.attr)
3462 param = ISCSI_PARAM_TCP_WSF_DISABLE;
3463 else if (attr == &dev_attr_conn_tcp_nagle_disable.attr)
3464 param = ISCSI_PARAM_TCP_NAGLE_DISABLE;
3465 else if (attr == &dev_attr_conn_tcp_timer_scale.attr)
3466 param = ISCSI_PARAM_TCP_TIMER_SCALE;
3467 else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr)
3468 param = ISCSI_PARAM_TCP_TIMESTAMP_EN;
3469 else if (attr == &dev_attr_conn_fragment_disable.attr)
3470 param = ISCSI_PARAM_IP_FRAGMENT_DISABLE;
3471 else if (attr == &dev_attr_conn_ipv4_tos.attr)
3472 param = ISCSI_PARAM_IPV4_TOS;
3473 else if (attr == &dev_attr_conn_ipv6_traffic_class.attr)
3474 param = ISCSI_PARAM_IPV6_TC;
3475 else if (attr == &dev_attr_conn_ipv6_flow_label.attr)
3476 param = ISCSI_PARAM_IPV6_FLOW_LABEL;
3477 else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr)
3478 param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6;
3479 else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr)
3480 param = ISCSI_PARAM_TCP_XMIT_WSF;
3481 else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)
3482 param = ISCSI_PARAM_TCP_RECV_WSF;
3419 else { 3483 else {
3420 WARN_ONCE(1, "Invalid conn attr"); 3484 WARN_ONCE(1, "Invalid conn attr");
3421 return 0; 3485 return 0;
@@ -3476,6 +3540,21 @@ iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0);
3476iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0); 3540iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0);
3477iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0); 3541iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0);
3478iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0); 3542iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0);
3543iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0);
3544iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0);
3545iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0);
3546iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0);
3547iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0);
3548iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0);
3549iscsi_session_attr(discovery_auth_optional,
3550 ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0);
3551iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0);
3552iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0);
3553iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0);
3554iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0);
3555iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
3556iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
3557iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
3479 3558
3480static ssize_t 3559static ssize_t
3481show_priv_session_state(struct device *dev, struct device_attribute *attr, 3560show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -3580,6 +3659,20 @@ static struct attribute *iscsi_session_attrs[] = {
3580 &dev_attr_sess_chap_out_idx.attr, 3659 &dev_attr_sess_chap_out_idx.attr,
3581 &dev_attr_sess_chap_in_idx.attr, 3660 &dev_attr_sess_chap_in_idx.attr,
3582 &dev_attr_priv_sess_target_id.attr, 3661 &dev_attr_priv_sess_target_id.attr,
3662 &dev_attr_sess_auto_snd_tgt_disable.attr,
3663 &dev_attr_sess_discovery_session.attr,
3664 &dev_attr_sess_portal_type.attr,
3665 &dev_attr_sess_chap_auth.attr,
3666 &dev_attr_sess_discovery_logout.attr,
3667 &dev_attr_sess_bidi_chap.attr,
3668 &dev_attr_sess_discovery_auth_optional.attr,
3669 &dev_attr_sess_def_time2wait.attr,
3670 &dev_attr_sess_def_time2retain.attr,
3671 &dev_attr_sess_isid.attr,
3672 &dev_attr_sess_tsid.attr,
3673 &dev_attr_sess_def_taskmgmt_tmo.attr,
3674 &dev_attr_sess_discovery_parent_idx.attr,
3675 &dev_attr_sess_discovery_parent_type.attr,
3583 NULL, 3676 NULL,
3584}; 3677};
3585 3678
@@ -3643,6 +3736,34 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
3643 param = ISCSI_PARAM_BOOT_NIC; 3736 param = ISCSI_PARAM_BOOT_NIC;
3644 else if (attr == &dev_attr_sess_boot_target.attr) 3737 else if (attr == &dev_attr_sess_boot_target.attr)
3645 param = ISCSI_PARAM_BOOT_TARGET; 3738 param = ISCSI_PARAM_BOOT_TARGET;
3739 else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr)
3740 param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE;
3741 else if (attr == &dev_attr_sess_discovery_session.attr)
3742 param = ISCSI_PARAM_DISCOVERY_SESS;
3743 else if (attr == &dev_attr_sess_portal_type.attr)
3744 param = ISCSI_PARAM_PORTAL_TYPE;
3745 else if (attr == &dev_attr_sess_chap_auth.attr)
3746 param = ISCSI_PARAM_CHAP_AUTH_EN;
3747 else if (attr == &dev_attr_sess_discovery_logout.attr)
3748 param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN;
3749 else if (attr == &dev_attr_sess_bidi_chap.attr)
3750 param = ISCSI_PARAM_BIDI_CHAP_EN;
3751 else if (attr == &dev_attr_sess_discovery_auth_optional.attr)
3752 param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL;
3753 else if (attr == &dev_attr_sess_def_time2wait.attr)
3754 param = ISCSI_PARAM_DEF_TIME2WAIT;
3755 else if (attr == &dev_attr_sess_def_time2retain.attr)
3756 param = ISCSI_PARAM_DEF_TIME2RETAIN;
3757 else if (attr == &dev_attr_sess_isid.attr)
3758 param = ISCSI_PARAM_ISID;
3759 else if (attr == &dev_attr_sess_tsid.attr)
3760 param = ISCSI_PARAM_TSID;
3761 else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr)
3762 param = ISCSI_PARAM_DEF_TASKMGMT_TMO;
3763 else if (attr == &dev_attr_sess_discovery_parent_idx.attr)
3764 param = ISCSI_PARAM_DISCOVERY_PARENT_IDX;
3765 else if (attr == &dev_attr_sess_discovery_parent_type.attr)
3766 param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE;
3646 else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) 3767 else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
3647 return S_IRUGO | S_IWUSR; 3768 return S_IRUGO | S_IWUSR;
3648 else if (attr == &dev_attr_priv_sess_state.attr) 3769 else if (attr == &dev_attr_priv_sess_state.attr)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 86fcf2c313ad..b58e8f815a00 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -132,8 +132,8 @@ static const char *sd_cache_types[] = {
132}; 132};
133 133
134static ssize_t 134static ssize_t
135sd_store_cache_type(struct device *dev, struct device_attribute *attr, 135cache_type_store(struct device *dev, struct device_attribute *attr,
136 const char *buf, size_t count) 136 const char *buf, size_t count)
137{ 137{
138 int i, ct = -1, rcd, wce, sp; 138 int i, ct = -1, rcd, wce, sp;
139 struct scsi_disk *sdkp = to_scsi_disk(dev); 139 struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -199,8 +199,18 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
199} 199}
200 200
201static ssize_t 201static ssize_t
202sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr, 202manage_start_stop_show(struct device *dev, struct device_attribute *attr,
203 const char *buf, size_t count) 203 char *buf)
204{
205 struct scsi_disk *sdkp = to_scsi_disk(dev);
206 struct scsi_device *sdp = sdkp->device;
207
208 return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
209}
210
211static ssize_t
212manage_start_stop_store(struct device *dev, struct device_attribute *attr,
213 const char *buf, size_t count)
204{ 214{
205 struct scsi_disk *sdkp = to_scsi_disk(dev); 215 struct scsi_disk *sdkp = to_scsi_disk(dev);
206 struct scsi_device *sdp = sdkp->device; 216 struct scsi_device *sdp = sdkp->device;
@@ -212,10 +222,19 @@ sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr,
212 222
213 return count; 223 return count;
214} 224}
225static DEVICE_ATTR_RW(manage_start_stop);
215 226
216static ssize_t 227static ssize_t
217sd_store_allow_restart(struct device *dev, struct device_attribute *attr, 228allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
218 const char *buf, size_t count) 229{
230 struct scsi_disk *sdkp = to_scsi_disk(dev);
231
232 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
233}
234
235static ssize_t
236allow_restart_store(struct device *dev, struct device_attribute *attr,
237 const char *buf, size_t count)
219{ 238{
220 struct scsi_disk *sdkp = to_scsi_disk(dev); 239 struct scsi_disk *sdkp = to_scsi_disk(dev);
221 struct scsi_device *sdp = sdkp->device; 240 struct scsi_device *sdp = sdkp->device;
@@ -230,47 +249,30 @@ sd_store_allow_restart(struct device *dev, struct device_attribute *attr,
230 249
231 return count; 250 return count;
232} 251}
252static DEVICE_ATTR_RW(allow_restart);
233 253
234static ssize_t 254static ssize_t
235sd_show_cache_type(struct device *dev, struct device_attribute *attr, 255cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
236 char *buf)
237{ 256{
238 struct scsi_disk *sdkp = to_scsi_disk(dev); 257 struct scsi_disk *sdkp = to_scsi_disk(dev);
239 int ct = sdkp->RCD + 2*sdkp->WCE; 258 int ct = sdkp->RCD + 2*sdkp->WCE;
240 259
241 return snprintf(buf, 40, "%s\n", sd_cache_types[ct]); 260 return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
242} 261}
262static DEVICE_ATTR_RW(cache_type);
243 263
244static ssize_t 264static ssize_t
245sd_show_fua(struct device *dev, struct device_attribute *attr, char *buf) 265FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
246{ 266{
247 struct scsi_disk *sdkp = to_scsi_disk(dev); 267 struct scsi_disk *sdkp = to_scsi_disk(dev);
248 268
249 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); 269 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
250} 270}
271static DEVICE_ATTR_RO(FUA);
251 272
252static ssize_t 273static ssize_t
253sd_show_manage_start_stop(struct device *dev, struct device_attribute *attr, 274protection_type_show(struct device *dev, struct device_attribute *attr,
254 char *buf) 275 char *buf)
255{
256 struct scsi_disk *sdkp = to_scsi_disk(dev);
257 struct scsi_device *sdp = sdkp->device;
258
259 return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
260}
261
262static ssize_t
263sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
264 char *buf)
265{
266 struct scsi_disk *sdkp = to_scsi_disk(dev);
267
268 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
269}
270
271static ssize_t
272sd_show_protection_type(struct device *dev, struct device_attribute *attr,
273 char *buf)
274{ 276{
275 struct scsi_disk *sdkp = to_scsi_disk(dev); 277 struct scsi_disk *sdkp = to_scsi_disk(dev);
276 278
@@ -278,8 +280,8 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
278} 280}
279 281
280static ssize_t 282static ssize_t
281sd_store_protection_type(struct device *dev, struct device_attribute *attr, 283protection_type_store(struct device *dev, struct device_attribute *attr,
282 const char *buf, size_t count) 284 const char *buf, size_t count)
283{ 285{
284 struct scsi_disk *sdkp = to_scsi_disk(dev); 286 struct scsi_disk *sdkp = to_scsi_disk(dev);
285 unsigned int val; 287 unsigned int val;
@@ -298,10 +300,11 @@ sd_store_protection_type(struct device *dev, struct device_attribute *attr,
298 300
299 return count; 301 return count;
300} 302}
303static DEVICE_ATTR_RW(protection_type);
301 304
302static ssize_t 305static ssize_t
303sd_show_protection_mode(struct device *dev, struct device_attribute *attr, 306protection_mode_show(struct device *dev, struct device_attribute *attr,
304 char *buf) 307 char *buf)
305{ 308{
306 struct scsi_disk *sdkp = to_scsi_disk(dev); 309 struct scsi_disk *sdkp = to_scsi_disk(dev);
307 struct scsi_device *sdp = sdkp->device; 310 struct scsi_device *sdp = sdkp->device;
@@ -320,24 +323,26 @@ sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
320 323
321 return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif); 324 return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
322} 325}
326static DEVICE_ATTR_RO(protection_mode);
323 327
324static ssize_t 328static ssize_t
325sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, 329app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
326 char *buf)
327{ 330{
328 struct scsi_disk *sdkp = to_scsi_disk(dev); 331 struct scsi_disk *sdkp = to_scsi_disk(dev);
329 332
330 return snprintf(buf, 20, "%u\n", sdkp->ATO); 333 return snprintf(buf, 20, "%u\n", sdkp->ATO);
331} 334}
335static DEVICE_ATTR_RO(app_tag_own);
332 336
333static ssize_t 337static ssize_t
334sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr, 338thin_provisioning_show(struct device *dev, struct device_attribute *attr,
335 char *buf) 339 char *buf)
336{ 340{
337 struct scsi_disk *sdkp = to_scsi_disk(dev); 341 struct scsi_disk *sdkp = to_scsi_disk(dev);
338 342
339 return snprintf(buf, 20, "%u\n", sdkp->lbpme); 343 return snprintf(buf, 20, "%u\n", sdkp->lbpme);
340} 344}
345static DEVICE_ATTR_RO(thin_provisioning);
341 346
342static const char *lbp_mode[] = { 347static const char *lbp_mode[] = {
343 [SD_LBP_FULL] = "full", 348 [SD_LBP_FULL] = "full",
@@ -349,8 +354,8 @@ static const char *lbp_mode[] = {
349}; 354};
350 355
351static ssize_t 356static ssize_t
352sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr, 357provisioning_mode_show(struct device *dev, struct device_attribute *attr,
353 char *buf) 358 char *buf)
354{ 359{
355 struct scsi_disk *sdkp = to_scsi_disk(dev); 360 struct scsi_disk *sdkp = to_scsi_disk(dev);
356 361
@@ -358,8 +363,8 @@ sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
358} 363}
359 364
360static ssize_t 365static ssize_t
361sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr, 366provisioning_mode_store(struct device *dev, struct device_attribute *attr,
362 const char *buf, size_t count) 367 const char *buf, size_t count)
363{ 368{
364 struct scsi_disk *sdkp = to_scsi_disk(dev); 369 struct scsi_disk *sdkp = to_scsi_disk(dev);
365 struct scsi_device *sdp = sdkp->device; 370 struct scsi_device *sdp = sdkp->device;
@@ -385,10 +390,11 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
385 390
386 return count; 391 return count;
387} 392}
393static DEVICE_ATTR_RW(provisioning_mode);
388 394
389static ssize_t 395static ssize_t
390sd_show_max_medium_access_timeouts(struct device *dev, 396max_medium_access_timeouts_show(struct device *dev,
391 struct device_attribute *attr, char *buf) 397 struct device_attribute *attr, char *buf)
392{ 398{
393 struct scsi_disk *sdkp = to_scsi_disk(dev); 399 struct scsi_disk *sdkp = to_scsi_disk(dev);
394 400
@@ -396,9 +402,9 @@ sd_show_max_medium_access_timeouts(struct device *dev,
396} 402}
397 403
398static ssize_t 404static ssize_t
399sd_store_max_medium_access_timeouts(struct device *dev, 405max_medium_access_timeouts_store(struct device *dev,
400 struct device_attribute *attr, 406 struct device_attribute *attr, const char *buf,
401 const char *buf, size_t count) 407 size_t count)
402{ 408{
403 struct scsi_disk *sdkp = to_scsi_disk(dev); 409 struct scsi_disk *sdkp = to_scsi_disk(dev);
404 int err; 410 int err;
@@ -410,10 +416,11 @@ sd_store_max_medium_access_timeouts(struct device *dev,
410 416
411 return err ? err : count; 417 return err ? err : count;
412} 418}
419static DEVICE_ATTR_RW(max_medium_access_timeouts);
413 420
414static ssize_t 421static ssize_t
415sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, 422max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
416 char *buf) 423 char *buf)
417{ 424{
418 struct scsi_disk *sdkp = to_scsi_disk(dev); 425 struct scsi_disk *sdkp = to_scsi_disk(dev);
419 426
@@ -421,8 +428,8 @@ sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
421} 428}
422 429
423static ssize_t 430static ssize_t
424sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, 431max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
425 const char *buf, size_t count) 432 const char *buf, size_t count)
426{ 433{
427 struct scsi_disk *sdkp = to_scsi_disk(dev); 434 struct scsi_disk *sdkp = to_scsi_disk(dev);
428 struct scsi_device *sdp = sdkp->device; 435 struct scsi_device *sdp = sdkp->device;
@@ -451,35 +458,29 @@ sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
451 458
452 return count; 459 return count;
453} 460}
454 461static DEVICE_ATTR_RW(max_write_same_blocks);
455static struct device_attribute sd_disk_attrs[] = { 462
456 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 463static struct attribute *sd_disk_attrs[] = {
457 sd_store_cache_type), 464 &dev_attr_cache_type.attr,
458 __ATTR(FUA, S_IRUGO, sd_show_fua, NULL), 465 &dev_attr_FUA.attr,
459 __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart, 466 &dev_attr_allow_restart.attr,
460 sd_store_allow_restart), 467 &dev_attr_manage_start_stop.attr,
461 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, 468 &dev_attr_protection_type.attr,
462 sd_store_manage_start_stop), 469 &dev_attr_protection_mode.attr,
463 __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type, 470 &dev_attr_app_tag_own.attr,
464 sd_store_protection_type), 471 &dev_attr_thin_provisioning.attr,
465 __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), 472 &dev_attr_provisioning_mode.attr,
466 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 473 &dev_attr_max_write_same_blocks.attr,
467 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), 474 &dev_attr_max_medium_access_timeouts.attr,
468 __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, 475 NULL,
469 sd_store_provisioning_mode),
470 __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR,
471 sd_show_write_same_blocks, sd_store_write_same_blocks),
472 __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
473 sd_show_max_medium_access_timeouts,
474 sd_store_max_medium_access_timeouts),
475 __ATTR_NULL,
476}; 476};
477ATTRIBUTE_GROUPS(sd_disk);
477 478
478static struct class sd_disk_class = { 479static struct class sd_disk_class = {
479 .name = "scsi_disk", 480 .name = "scsi_disk",
480 .owner = THIS_MODULE, 481 .owner = THIS_MODULE,
481 .dev_release = scsi_disk_release, 482 .dev_release = scsi_disk_release,
482 .dev_attrs = sd_disk_attrs, 483 .dev_groups = sd_disk_groups,
483}; 484};
484 485
485static const struct dev_pm_ops sd_pm_ops = { 486static const struct dev_pm_ops sd_pm_ops = {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961484e1..5cbc4bb1b395 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,11 +105,8 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
105static int sg_add(struct device *, struct class_interface *); 105static int sg_add(struct device *, struct class_interface *);
106static void sg_remove(struct device *, struct class_interface *); 106static void sg_remove(struct device *, struct class_interface *);
107 107
108static DEFINE_SPINLOCK(sg_open_exclusive_lock);
109
110static DEFINE_IDR(sg_index_idr); 108static DEFINE_IDR(sg_index_idr);
111static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock 109static DEFINE_RWLOCK(sg_index_lock);
112 file descriptor list for device */
113 110
114static struct class_interface sg_interface = { 111static struct class_interface sg_interface = {
115 .add_dev = sg_add, 112 .add_dev = sg_add,
@@ -146,8 +143,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
146} Sg_request; 143} Sg_request;
147 144
148typedef struct sg_fd { /* holds the state of a file descriptor */ 145typedef struct sg_fd { /* holds the state of a file descriptor */
149 /* sfd_siblings is protected by sg_index_lock */ 146 struct list_head sfd_siblings; /* protected by sfd_lock of device */
150 struct list_head sfd_siblings;
151 struct sg_device *parentdp; /* owning device */ 147 struct sg_device *parentdp; /* owning device */
152 wait_queue_head_t read_wait; /* queue read until command done */ 148 wait_queue_head_t read_wait; /* queue read until command done */
153 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 149 rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -170,13 +166,12 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
170 166
171typedef struct sg_device { /* holds the state of each scsi generic device */ 167typedef struct sg_device { /* holds the state of each scsi generic device */
172 struct scsi_device *device; 168 struct scsi_device *device;
173 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
174 int sg_tablesize; /* adapter's max scatter-gather table size */ 169 int sg_tablesize; /* adapter's max scatter-gather table size */
175 u32 index; /* device index number */ 170 u32 index; /* device index number */
176 /* sfds is protected by sg_index_lock */ 171 spinlock_t sfd_lock; /* protect file descriptor list for device */
177 struct list_head sfds; 172 struct list_head sfds;
173 struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
178 volatile char detached; /* 0->attached, 1->detached pending removal */ 174 volatile char detached; /* 0->attached, 1->detached pending removal */
179 /* exclude protected by sg_open_exclusive_lock */
180 char exclude; /* opened for exclusive access */ 175 char exclude; /* opened for exclusive access */
181 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 176 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
182 struct gendisk *disk; 177 struct gendisk *disk;
@@ -225,35 +220,14 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
225 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); 220 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
226} 221}
227 222
228static int get_exclude(Sg_device *sdp)
229{
230 unsigned long flags;
231 int ret;
232
233 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
234 ret = sdp->exclude;
235 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
236 return ret;
237}
238
239static int set_exclude(Sg_device *sdp, char val)
240{
241 unsigned long flags;
242
243 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
244 sdp->exclude = val;
245 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
246 return val;
247}
248
249static int sfds_list_empty(Sg_device *sdp) 223static int sfds_list_empty(Sg_device *sdp)
250{ 224{
251 unsigned long flags; 225 unsigned long flags;
252 int ret; 226 int ret;
253 227
254 read_lock_irqsave(&sg_index_lock, flags); 228 spin_lock_irqsave(&sdp->sfd_lock, flags);
255 ret = list_empty(&sdp->sfds); 229 ret = list_empty(&sdp->sfds);
256 read_unlock_irqrestore(&sg_index_lock, flags); 230 spin_unlock_irqrestore(&sdp->sfd_lock, flags);
257 return ret; 231 return ret;
258} 232}
259 233
@@ -265,7 +239,6 @@ sg_open(struct inode *inode, struct file *filp)
265 struct request_queue *q; 239 struct request_queue *q;
266 Sg_device *sdp; 240 Sg_device *sdp;
267 Sg_fd *sfp; 241 Sg_fd *sfp;
268 int res;
269 int retval; 242 int retval;
270 243
271 nonseekable_open(inode, filp); 244 nonseekable_open(inode, filp);
@@ -294,54 +267,52 @@ sg_open(struct inode *inode, struct file *filp)
294 goto error_out; 267 goto error_out;
295 } 268 }
296 269
297 if (flags & O_EXCL) { 270 if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) {
298 if (O_RDONLY == (flags & O_ACCMODE)) { 271 retval = -EPERM; /* Can't lock it with read only access */
299 retval = -EPERM; /* Can't lock it with read only access */
300 goto error_out;
301 }
302 if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
303 retval = -EBUSY;
304 goto error_out;
305 }
306 res = wait_event_interruptible(sdp->o_excl_wait,
307 ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
308 if (res) {
309 retval = res; /* -ERESTARTSYS because signal hit process */
310 goto error_out;
311 }
312 } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
313 if (flags & O_NONBLOCK) {
314 retval = -EBUSY;
315 goto error_out;
316 }
317 res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
318 if (res) {
319 retval = res; /* -ERESTARTSYS because signal hit process */
320 goto error_out;
321 }
322 }
323 if (sdp->detached) {
324 retval = -ENODEV;
325 goto error_out; 272 goto error_out;
326 } 273 }
274 if (flags & O_NONBLOCK) {
275 if (flags & O_EXCL) {
276 if (!down_write_trylock(&sdp->o_sem)) {
277 retval = -EBUSY;
278 goto error_out;
279 }
280 } else {
281 if (!down_read_trylock(&sdp->o_sem)) {
282 retval = -EBUSY;
283 goto error_out;
284 }
285 }
286 } else {
287 if (flags & O_EXCL)
288 down_write(&sdp->o_sem);
289 else
290 down_read(&sdp->o_sem);
291 }
292 /* Since write lock is held, no need to check sfd_list */
293 if (flags & O_EXCL)
294 sdp->exclude = 1; /* used by release lock */
295
327 if (sfds_list_empty(sdp)) { /* no existing opens on this device */ 296 if (sfds_list_empty(sdp)) { /* no existing opens on this device */
328 sdp->sgdebug = 0; 297 sdp->sgdebug = 0;
329 q = sdp->device->request_queue; 298 q = sdp->device->request_queue;
330 sdp->sg_tablesize = queue_max_segments(q); 299 sdp->sg_tablesize = queue_max_segments(q);
331 } 300 }
332 if ((sfp = sg_add_sfp(sdp, dev))) 301 sfp = sg_add_sfp(sdp, dev);
302 if (!IS_ERR(sfp))
333 filp->private_data = sfp; 303 filp->private_data = sfp;
304 /* retval is already provably zero at this point because of the
305 * check after retval = scsi_autopm_get_device(sdp->device))
306 */
334 else { 307 else {
308 retval = PTR_ERR(sfp);
309
335 if (flags & O_EXCL) { 310 if (flags & O_EXCL) {
336 set_exclude(sdp, 0); /* undo if error */ 311 sdp->exclude = 0; /* undo if error */
337 wake_up_interruptible(&sdp->o_excl_wait); 312 up_write(&sdp->o_sem);
338 } 313 } else
339 retval = -ENOMEM; 314 up_read(&sdp->o_sem);
340 goto error_out;
341 }
342 retval = 0;
343error_out: 315error_out:
344 if (retval) {
345 scsi_autopm_put_device(sdp->device); 316 scsi_autopm_put_device(sdp->device);
346sdp_put: 317sdp_put:
347 scsi_device_put(sdp->device); 318 scsi_device_put(sdp->device);
@@ -358,13 +329,18 @@ sg_release(struct inode *inode, struct file *filp)
358{ 329{
359 Sg_device *sdp; 330 Sg_device *sdp;
360 Sg_fd *sfp; 331 Sg_fd *sfp;
332 int excl;
361 333
362 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 334 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
363 return -ENXIO; 335 return -ENXIO;
364 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 336 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
365 337
366 set_exclude(sdp, 0); 338 excl = sdp->exclude;
367 wake_up_interruptible(&sdp->o_excl_wait); 339 sdp->exclude = 0;
340 if (excl)
341 up_write(&sdp->o_sem);
342 else
343 up_read(&sdp->o_sem);
368 344
369 scsi_autopm_put_device(sdp->device); 345 scsi_autopm_put_device(sdp->device);
370 kref_put(&sfp->f_ref, sg_remove_sfp); 346 kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1415,8 +1391,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1415 disk->first_minor = k; 1391 disk->first_minor = k;
1416 sdp->disk = disk; 1392 sdp->disk = disk;
1417 sdp->device = scsidp; 1393 sdp->device = scsidp;
1394 spin_lock_init(&sdp->sfd_lock);
1418 INIT_LIST_HEAD(&sdp->sfds); 1395 INIT_LIST_HEAD(&sdp->sfds);
1419 init_waitqueue_head(&sdp->o_excl_wait); 1396 init_rwsem(&sdp->o_sem);
1420 sdp->sg_tablesize = queue_max_segments(q); 1397 sdp->sg_tablesize = queue_max_segments(q);
1421 sdp->index = k; 1398 sdp->index = k;
1422 kref_init(&sdp->d_ref); 1399 kref_init(&sdp->d_ref);
@@ -1549,11 +1526,13 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1549 1526
1550 /* Need a write lock to set sdp->detached. */ 1527 /* Need a write lock to set sdp->detached. */
1551 write_lock_irqsave(&sg_index_lock, iflags); 1528 write_lock_irqsave(&sg_index_lock, iflags);
1529 spin_lock(&sdp->sfd_lock);
1552 sdp->detached = 1; 1530 sdp->detached = 1;
1553 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { 1531 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1554 wake_up_interruptible(&sfp->read_wait); 1532 wake_up_interruptible(&sfp->read_wait);
1555 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); 1533 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1556 } 1534 }
1535 spin_unlock(&sdp->sfd_lock);
1557 write_unlock_irqrestore(&sg_index_lock, iflags); 1536 write_unlock_irqrestore(&sg_index_lock, iflags);
1558 1537
1559 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1538 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2064,7 +2043,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2064 2043
2065 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2044 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2066 if (!sfp) 2045 if (!sfp)
2067 return NULL; 2046 return ERR_PTR(-ENOMEM);
2068 2047
2069 init_waitqueue_head(&sfp->read_wait); 2048 init_waitqueue_head(&sfp->read_wait);
2070 rwlock_init(&sfp->rq_list_lock); 2049 rwlock_init(&sfp->rq_list_lock);
@@ -2078,9 +2057,13 @@ sg_add_sfp(Sg_device * sdp, int dev)
2078 sfp->cmd_q = SG_DEF_COMMAND_Q; 2057 sfp->cmd_q = SG_DEF_COMMAND_Q;
2079 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2058 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2080 sfp->parentdp = sdp; 2059 sfp->parentdp = sdp;
2081 write_lock_irqsave(&sg_index_lock, iflags); 2060 spin_lock_irqsave(&sdp->sfd_lock, iflags);
2061 if (sdp->detached) {
2062 spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
2063 return ERR_PTR(-ENODEV);
2064 }
2082 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2065 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2083 write_unlock_irqrestore(&sg_index_lock, iflags); 2066 spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
2084 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2067 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2085 if (unlikely(sg_big_buff != def_reserved_size)) 2068 if (unlikely(sg_big_buff != def_reserved_size))
2086 sg_big_buff = def_reserved_size; 2069 sg_big_buff = def_reserved_size;
@@ -2130,10 +2113,9 @@ static void sg_remove_sfp(struct kref *kref)
2130 struct sg_device *sdp = sfp->parentdp; 2113 struct sg_device *sdp = sfp->parentdp;
2131 unsigned long iflags; 2114 unsigned long iflags;
2132 2115
2133 write_lock_irqsave(&sg_index_lock, iflags); 2116 spin_lock_irqsave(&sdp->sfd_lock, iflags);
2134 list_del(&sfp->sfd_siblings); 2117 list_del(&sfp->sfd_siblings);
2135 write_unlock_irqrestore(&sg_index_lock, iflags); 2118 spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
2136 wake_up_interruptible(&sdp->o_excl_wait);
2137 2119
2138 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); 2120 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2139 schedule_work(&sfp->ew.work); 2121 schedule_work(&sfp->ew.work);
@@ -2520,7 +2502,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2520 return 0; 2502 return 0;
2521} 2503}
2522 2504
2523/* must be called while holding sg_index_lock */ 2505/* must be called while holding sg_index_lock and sfd_lock */
2524static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2506static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2525{ 2507{
2526 int k, m, new_interface, blen, usg; 2508 int k, m, new_interface, blen, usg;
@@ -2605,22 +2587,26 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2605 2587
2606 read_lock_irqsave(&sg_index_lock, iflags); 2588 read_lock_irqsave(&sg_index_lock, iflags);
2607 sdp = it ? sg_lookup_dev(it->index) : NULL; 2589 sdp = it ? sg_lookup_dev(it->index) : NULL;
2608 if (sdp && !list_empty(&sdp->sfds)) { 2590 if (sdp) {
2609 struct scsi_device *scsidp = sdp->device; 2591 spin_lock(&sdp->sfd_lock);
2592 if (!list_empty(&sdp->sfds)) {
2593 struct scsi_device *scsidp = sdp->device;
2610 2594
2611 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); 2595 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2612 if (sdp->detached) 2596 if (sdp->detached)
2613 seq_printf(s, "detached pending close "); 2597 seq_printf(s, "detached pending close ");
2614 else 2598 else
2615 seq_printf 2599 seq_printf
2616 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2600 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2617 scsidp->host->host_no, 2601 scsidp->host->host_no,
2618 scsidp->channel, scsidp->id, 2602 scsidp->channel, scsidp->id,
2619 scsidp->lun, 2603 scsidp->lun,
2620 scsidp->host->hostt->emulated); 2604 scsidp->host->hostt->emulated);
2621 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2605 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2622 sdp->sg_tablesize, get_exclude(sdp)); 2606 sdp->sg_tablesize, sdp->exclude);
2623 sg_proc_debug_helper(s, sdp); 2607 sg_proc_debug_helper(s, sdp);
2608 }
2609 spin_unlock(&sdp->sfd_lock);
2624 } 2610 }
2625 read_unlock_irqrestore(&sg_index_lock, iflags); 2611 read_unlock_irqrestore(&sg_index_lock, iflags);
2626 return 0; 2612 return 0;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 2a32036a9404..ff44b3c2cff2 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -82,7 +82,7 @@ static int try_rdio = 1;
82static int try_wdio = 1; 82static int try_wdio = 1;
83 83
84static struct class st_sysfs_class; 84static struct class st_sysfs_class;
85static struct device_attribute st_dev_attrs[]; 85static const struct attribute_group *st_dev_groups[];
86 86
87MODULE_AUTHOR("Kai Makisara"); 87MODULE_AUTHOR("Kai Makisara");
88MODULE_DESCRIPTION("SCSI tape (st) driver"); 88MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -4274,7 +4274,7 @@ static void scsi_tape_release(struct kref *kref)
4274 4274
4275static struct class st_sysfs_class = { 4275static struct class st_sysfs_class = {
4276 .name = "scsi_tape", 4276 .name = "scsi_tape",
4277 .dev_attrs = st_dev_attrs, 4277 .dev_groups = st_dev_groups,
4278}; 4278};
4279 4279
4280static int __init init_st(void) 4280static int __init init_st(void)
@@ -4408,6 +4408,7 @@ defined_show(struct device *dev, struct device_attribute *attr, char *buf)
4408 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); 4408 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
4409 return l; 4409 return l;
4410} 4410}
4411static DEVICE_ATTR_RO(defined);
4411 4412
4412static ssize_t 4413static ssize_t
4413default_blksize_show(struct device *dev, struct device_attribute *attr, 4414default_blksize_show(struct device *dev, struct device_attribute *attr,
@@ -4419,7 +4420,7 @@ default_blksize_show(struct device *dev, struct device_attribute *attr,
4419 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); 4420 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
4420 return l; 4421 return l;
4421} 4422}
4422 4423static DEVICE_ATTR_RO(default_blksize);
4423 4424
4424static ssize_t 4425static ssize_t
4425default_density_show(struct device *dev, struct device_attribute *attr, 4426default_density_show(struct device *dev, struct device_attribute *attr,
@@ -4433,6 +4434,7 @@ default_density_show(struct device *dev, struct device_attribute *attr,
4433 l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density); 4434 l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density);
4434 return l; 4435 return l;
4435} 4436}
4437static DEVICE_ATTR_RO(default_density);
4436 4438
4437static ssize_t 4439static ssize_t
4438default_compression_show(struct device *dev, struct device_attribute *attr, 4440default_compression_show(struct device *dev, struct device_attribute *attr,
@@ -4444,6 +4446,7 @@ default_compression_show(struct device *dev, struct device_attribute *attr,
4444 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); 4446 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
4445 return l; 4447 return l;
4446} 4448}
4449static DEVICE_ATTR_RO(default_compression);
4447 4450
4448static ssize_t 4451static ssize_t
4449options_show(struct device *dev, struct device_attribute *attr, char *buf) 4452options_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -4472,15 +4475,17 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
4472 l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); 4475 l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
4473 return l; 4476 return l;
4474} 4477}
4475 4478static DEVICE_ATTR_RO(options);
4476static struct device_attribute st_dev_attrs[] = { 4479
4477 __ATTR_RO(defined), 4480static struct attribute *st_dev_attrs[] = {
4478 __ATTR_RO(default_blksize), 4481 &dev_attr_defined.attr,
4479 __ATTR_RO(default_density), 4482 &dev_attr_default_blksize.attr,
4480 __ATTR_RO(default_compression), 4483 &dev_attr_default_density.attr,
4481 __ATTR_RO(options), 4484 &dev_attr_default_compression.attr,
4482 __ATTR_NULL, 4485 &dev_attr_options.attr,
4486 NULL,
4483}; 4487};
4488ATTRIBUTE_GROUPS(st_dev);
4484 4489
4485/* The following functions may be useful for a larger audience. */ 4490/* The following functions may be useful for a larger audience. */
4486static int sgl_map_user_pages(struct st_buffer *STbp, 4491static int sgl_map_user_pages(struct st_buffer *STbp,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 139bc0647b41..bce09a6898c4 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -36,10 +36,17 @@
36#ifndef _UFS_H 36#ifndef _UFS_H
37#define _UFS_H 37#define _UFS_H
38 38
39#include <linux/mutex.h>
40#include <linux/types.h>
41
39#define MAX_CDB_SIZE 16 42#define MAX_CDB_SIZE 16
43#define GENERAL_UPIU_REQUEST_SIZE 32
44#define QUERY_DESC_MAX_SIZE 256
45#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
46 (sizeof(struct utp_upiu_header)))
40 47
41#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ 48#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
42 ((byte3 << 24) | (byte2 << 16) |\ 49 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
43 (byte1 << 8) | (byte0)) 50 (byte1 << 8) | (byte0))
44 51
45/* 52/*
@@ -62,7 +69,7 @@ enum {
62 UPIU_TRANSACTION_COMMAND = 0x01, 69 UPIU_TRANSACTION_COMMAND = 0x01,
63 UPIU_TRANSACTION_DATA_OUT = 0x02, 70 UPIU_TRANSACTION_DATA_OUT = 0x02,
64 UPIU_TRANSACTION_TASK_REQ = 0x04, 71 UPIU_TRANSACTION_TASK_REQ = 0x04,
65 UPIU_TRANSACTION_QUERY_REQ = 0x26, 72 UPIU_TRANSACTION_QUERY_REQ = 0x16,
66}; 73};
67 74
68/* UTP UPIU Transaction Codes Target to Initiator */ 75/* UTP UPIU Transaction Codes Target to Initiator */
@@ -73,6 +80,7 @@ enum {
73 UPIU_TRANSACTION_TASK_RSP = 0x24, 80 UPIU_TRANSACTION_TASK_RSP = 0x24,
74 UPIU_TRANSACTION_READY_XFER = 0x31, 81 UPIU_TRANSACTION_READY_XFER = 0x31,
75 UPIU_TRANSACTION_QUERY_RSP = 0x36, 82 UPIU_TRANSACTION_QUERY_RSP = 0x36,
83 UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
76}; 84};
77 85
78/* UPIU Read/Write flags */ 86/* UPIU Read/Write flags */
@@ -90,8 +98,41 @@ enum {
90 UPIU_TASK_ATTR_ACA = 0x03, 98 UPIU_TASK_ATTR_ACA = 0x03,
91}; 99};
92 100
93/* UTP QUERY Transaction Specific Fields OpCode */ 101/* UPIU Query request function */
94enum { 102enum {
103 UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
104 UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
105};
106
107/* Flag idn for Query Requests*/
108enum flag_idn {
109 QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
110 QUERY_FLAG_IDN_BKOPS_EN = 0x04,
111};
112
113/* Attribute idn for Query requests */
114enum attr_idn {
115 QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
116 QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
117 QUERY_ATTR_IDN_EE_STATUS = 0x0E,
118};
119
120/* Exception event mask values */
121enum {
122 MASK_EE_STATUS = 0xFFFF,
123 MASK_EE_URGENT_BKOPS = (1 << 2),
124};
125
126/* Background operation status */
127enum {
128 BKOPS_STATUS_NO_OP = 0x0,
129 BKOPS_STATUS_NON_CRITICAL = 0x1,
130 BKOPS_STATUS_PERF_IMPACT = 0x2,
131 BKOPS_STATUS_CRITICAL = 0x3,
132};
133
134/* UTP QUERY Transaction Specific Fields OpCode */
135enum query_opcode {
95 UPIU_QUERY_OPCODE_NOP = 0x0, 136 UPIU_QUERY_OPCODE_NOP = 0x0,
96 UPIU_QUERY_OPCODE_READ_DESC = 0x1, 137 UPIU_QUERY_OPCODE_READ_DESC = 0x1,
97 UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, 138 UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
@@ -103,6 +144,21 @@ enum {
103 UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, 144 UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
104}; 145};
105 146
147/* Query response result code */
148enum {
149 QUERY_RESULT_SUCCESS = 0x00,
150 QUERY_RESULT_NOT_READABLE = 0xF6,
151 QUERY_RESULT_NOT_WRITEABLE = 0xF7,
152 QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
153 QUERY_RESULT_INVALID_LENGTH = 0xF9,
154 QUERY_RESULT_INVALID_VALUE = 0xFA,
155 QUERY_RESULT_INVALID_SELECTOR = 0xFB,
156 QUERY_RESULT_INVALID_INDEX = 0xFC,
157 QUERY_RESULT_INVALID_IDN = 0xFD,
158 QUERY_RESULT_INVALID_OPCODE = 0xFE,
159 QUERY_RESULT_GENERAL_FAILURE = 0xFF,
160};
161
106/* UTP Transfer Request Command Type (CT) */ 162/* UTP Transfer Request Command Type (CT) */
107enum { 163enum {
108 UPIU_COMMAND_SET_TYPE_SCSI = 0x0, 164 UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
@@ -110,10 +166,18 @@ enum {
110 UPIU_COMMAND_SET_TYPE_QUERY = 0x2, 166 UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
111}; 167};
112 168
169/* UTP Transfer Request Command Offset */
170#define UPIU_COMMAND_TYPE_OFFSET 28
171
172/* Offset of the response code in the UPIU header */
173#define UPIU_RSP_CODE_OFFSET 8
174
113enum { 175enum {
114 MASK_SCSI_STATUS = 0xFF, 176 MASK_SCSI_STATUS = 0xFF,
115 MASK_TASK_RESPONSE = 0xFF00, 177 MASK_TASK_RESPONSE = 0xFF00,
116 MASK_RSP_UPIU_RESULT = 0xFFFF, 178 MASK_RSP_UPIU_RESULT = 0xFFFF,
179 MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
180 MASK_RSP_EXCEPTION_EVENT = 0x10000,
117}; 181};
118 182
119/* Task management service response */ 183/* Task management service response */
@@ -138,26 +202,59 @@ struct utp_upiu_header {
138 202
139/** 203/**
140 * struct utp_upiu_cmd - Command UPIU structure 204 * struct utp_upiu_cmd - Command UPIU structure
141 * @header: UPIU header structure DW-0 to DW-2
142 * @data_transfer_len: Data Transfer Length DW-3 205 * @data_transfer_len: Data Transfer Length DW-3
143 * @cdb: Command Descriptor Block CDB DW-4 to DW-7 206 * @cdb: Command Descriptor Block CDB DW-4 to DW-7
144 */ 207 */
145struct utp_upiu_cmd { 208struct utp_upiu_cmd {
146 struct utp_upiu_header header;
147 u32 exp_data_transfer_len; 209 u32 exp_data_transfer_len;
148 u8 cdb[MAX_CDB_SIZE]; 210 u8 cdb[MAX_CDB_SIZE];
149}; 211};
150 212
151/** 213/**
152 * struct utp_upiu_rsp - Response UPIU structure 214 * struct utp_upiu_query - upiu request buffer structure for
153 * @header: UPIU header DW-0 to DW-2 215 * query request.
216 * @opcode: command to perform B-0
217 * @idn: a value that indicates the particular type of data B-1
218 * @index: Index to further identify data B-2
219 * @selector: Index to further identify data B-3
220 * @reserved_osf: spec reserved field B-4,5
221 * @length: number of descriptor bytes to read/write B-6,7
222 * @value: Attribute value to be written DW-5
223 * @reserved: spec reserved DW-6,7
224 */
225struct utp_upiu_query {
226 u8 opcode;
227 u8 idn;
228 u8 index;
229 u8 selector;
230 u16 reserved_osf;
231 u16 length;
232 u32 value;
233 u32 reserved[2];
234};
235
236/**
237 * struct utp_upiu_req - general upiu request structure
238 * @header:UPIU header structure DW-0 to DW-2
239 * @sc: fields structure for scsi command DW-3 to DW-7
240 * @qr: fields structure for query request DW-3 to DW-7
241 */
242struct utp_upiu_req {
243 struct utp_upiu_header header;
244 union {
245 struct utp_upiu_cmd sc;
246 struct utp_upiu_query qr;
247 };
248};
249
250/**
251 * struct utp_cmd_rsp - Response UPIU structure
154 * @residual_transfer_count: Residual transfer count DW-3 252 * @residual_transfer_count: Residual transfer count DW-3
155 * @reserved: Reserved double words DW-4 to DW-7 253 * @reserved: Reserved double words DW-4 to DW-7
156 * @sense_data_len: Sense data length DW-8 U16 254 * @sense_data_len: Sense data length DW-8 U16
157 * @sense_data: Sense data field DW-8 to DW-12 255 * @sense_data: Sense data field DW-8 to DW-12
158 */ 256 */
159struct utp_upiu_rsp { 257struct utp_cmd_rsp {
160 struct utp_upiu_header header;
161 u32 residual_transfer_count; 258 u32 residual_transfer_count;
162 u32 reserved[4]; 259 u32 reserved[4];
163 u16 sense_data_len; 260 u16 sense_data_len;
@@ -165,6 +262,20 @@ struct utp_upiu_rsp {
165}; 262};
166 263
167/** 264/**
265 * struct utp_upiu_rsp - general upiu response structure
266 * @header: UPIU header structure DW-0 to DW-2
267 * @sr: fields structure for scsi command DW-3 to DW-12
268 * @qr: fields structure for query request DW-3 to DW-7
269 */
270struct utp_upiu_rsp {
271 struct utp_upiu_header header;
272 union {
273 struct utp_cmd_rsp sr;
274 struct utp_upiu_query qr;
275 };
276};
277
278/**
168 * struct utp_upiu_task_req - Task request UPIU structure 279 * struct utp_upiu_task_req - Task request UPIU structure
169 * @header - UPIU header structure DW0 to DW-2 280 * @header - UPIU header structure DW0 to DW-2
170 * @input_param1: Input parameter 1 DW-3 281 * @input_param1: Input parameter 1 DW-3
@@ -194,4 +305,24 @@ struct utp_upiu_task_rsp {
194 u32 reserved[3]; 305 u32 reserved[3];
195}; 306};
196 307
308/**
309 * struct ufs_query_req - parameters for building a query request
310 * @query_func: UPIU header query function
311 * @upiu_req: the query request data
312 */
313struct ufs_query_req {
314 u8 query_func;
315 struct utp_upiu_query upiu_req;
316};
317
318/**
319 * struct ufs_query_resp - UPIU QUERY
320 * @response: device response code
321 * @upiu_res: query response data
322 */
323struct ufs_query_res {
324 u8 response;
325 struct utp_upiu_query upiu_res;
326};
327
197#endif /* End of Header */ 328#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 48be39a6f6d7..a823cf44e949 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -35,6 +35,7 @@
35 35
36#include "ufshcd.h" 36#include "ufshcd.h"
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/pm_runtime.h>
38 39
39#ifdef CONFIG_PM 40#ifdef CONFIG_PM
40/** 41/**
@@ -44,7 +45,7 @@
44 * 45 *
45 * Returns -ENOSYS 46 * Returns -ENOSYS
46 */ 47 */
47static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state) 48static int ufshcd_pci_suspend(struct device *dev)
48{ 49{
49 /* 50 /*
50 * TODO: 51 * TODO:
@@ -61,7 +62,7 @@ static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
61 * 62 *
62 * Returns -ENOSYS 63 * Returns -ENOSYS
63 */ 64 */
64static int ufshcd_pci_resume(struct pci_dev *pdev) 65static int ufshcd_pci_resume(struct device *dev)
65{ 66{
66 /* 67 /*
67 * TODO: 68 * TODO:
@@ -71,8 +72,45 @@ static int ufshcd_pci_resume(struct pci_dev *pdev)
71 72
72 return -ENOSYS; 73 return -ENOSYS;
73} 74}
75#else
76#define ufshcd_pci_suspend NULL
77#define ufshcd_pci_resume NULL
74#endif /* CONFIG_PM */ 78#endif /* CONFIG_PM */
75 79
80#ifdef CONFIG_PM_RUNTIME
81static int ufshcd_pci_runtime_suspend(struct device *dev)
82{
83 struct ufs_hba *hba = dev_get_drvdata(dev);
84
85 if (!hba)
86 return 0;
87
88 return ufshcd_runtime_suspend(hba);
89}
90static int ufshcd_pci_runtime_resume(struct device *dev)
91{
92 struct ufs_hba *hba = dev_get_drvdata(dev);
93
94 if (!hba)
95 return 0;
96
97 return ufshcd_runtime_resume(hba);
98}
99static int ufshcd_pci_runtime_idle(struct device *dev)
100{
101 struct ufs_hba *hba = dev_get_drvdata(dev);
102
103 if (!hba)
104 return 0;
105
106 return ufshcd_runtime_idle(hba);
107}
108#else /* !CONFIG_PM_RUNTIME */
109#define ufshcd_pci_runtime_suspend NULL
110#define ufshcd_pci_runtime_resume NULL
111#define ufshcd_pci_runtime_idle NULL
112#endif /* CONFIG_PM_RUNTIME */
113
76/** 114/**
77 * ufshcd_pci_shutdown - main function to put the controller in reset state 115 * ufshcd_pci_shutdown - main function to put the controller in reset state
78 * @pdev: pointer to PCI device handle 116 * @pdev: pointer to PCI device handle
@@ -91,12 +129,10 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
91{ 129{
92 struct ufs_hba *hba = pci_get_drvdata(pdev); 130 struct ufs_hba *hba = pci_get_drvdata(pdev);
93 131
94 disable_irq(pdev->irq); 132 pm_runtime_forbid(&pdev->dev);
133 pm_runtime_get_noresume(&pdev->dev);
95 ufshcd_remove(hba); 134 ufshcd_remove(hba);
96 pci_release_regions(pdev);
97 pci_set_drvdata(pdev, NULL); 135 pci_set_drvdata(pdev, NULL);
98 pci_clear_master(pdev);
99 pci_disable_device(pdev);
100} 136}
101 137
102/** 138/**
@@ -133,55 +169,49 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
133 void __iomem *mmio_base; 169 void __iomem *mmio_base;
134 int err; 170 int err;
135 171
136 err = pci_enable_device(pdev); 172 err = pcim_enable_device(pdev);
137 if (err) { 173 if (err) {
138 dev_err(&pdev->dev, "pci_enable_device failed\n"); 174 dev_err(&pdev->dev, "pcim_enable_device failed\n");
139 goto out_error; 175 return err;
140 } 176 }
141 177
142 pci_set_master(pdev); 178 pci_set_master(pdev);
143 179
144 180 err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
145 err = pci_request_regions(pdev, UFSHCD);
146 if (err < 0) { 181 if (err < 0) {
147 dev_err(&pdev->dev, "request regions failed\n"); 182 dev_err(&pdev->dev, "request and iomap failed\n");
148 goto out_disable; 183 return err;
149 } 184 }
150 185
151 mmio_base = pci_ioremap_bar(pdev, 0); 186 mmio_base = pcim_iomap_table(pdev)[0];
152 if (!mmio_base) {
153 dev_err(&pdev->dev, "memory map failed\n");
154 err = -ENOMEM;
155 goto out_release_regions;
156 }
157 187
158 err = ufshcd_set_dma_mask(pdev); 188 err = ufshcd_set_dma_mask(pdev);
159 if (err) { 189 if (err) {
160 dev_err(&pdev->dev, "set dma mask failed\n"); 190 dev_err(&pdev->dev, "set dma mask failed\n");
161 goto out_iounmap; 191 return err;
162 } 192 }
163 193
164 err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); 194 err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
165 if (err) { 195 if (err) {
166 dev_err(&pdev->dev, "Initialization failed\n"); 196 dev_err(&pdev->dev, "Initialization failed\n");
167 goto out_iounmap; 197 return err;
168 } 198 }
169 199
170 pci_set_drvdata(pdev, hba); 200 pci_set_drvdata(pdev, hba);
201 pm_runtime_put_noidle(&pdev->dev);
202 pm_runtime_allow(&pdev->dev);
171 203
172 return 0; 204 return 0;
173
174out_iounmap:
175 iounmap(mmio_base);
176out_release_regions:
177 pci_release_regions(pdev);
178out_disable:
179 pci_clear_master(pdev);
180 pci_disable_device(pdev);
181out_error:
182 return err;
183} 205}
184 206
207static const struct dev_pm_ops ufshcd_pci_pm_ops = {
208 .suspend = ufshcd_pci_suspend,
209 .resume = ufshcd_pci_resume,
210 .runtime_suspend = ufshcd_pci_runtime_suspend,
211 .runtime_resume = ufshcd_pci_runtime_resume,
212 .runtime_idle = ufshcd_pci_runtime_idle,
213};
214
185static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { 215static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
186 { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 216 { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
187 { } /* terminate list */ 217 { } /* terminate list */
@@ -195,10 +225,9 @@ static struct pci_driver ufshcd_pci_driver = {
195 .probe = ufshcd_pci_probe, 225 .probe = ufshcd_pci_probe,
196 .remove = ufshcd_pci_remove, 226 .remove = ufshcd_pci_remove,
197 .shutdown = ufshcd_pci_shutdown, 227 .shutdown = ufshcd_pci_shutdown,
198#ifdef CONFIG_PM 228 .driver = {
199 .suspend = ufshcd_pci_suspend, 229 .pm = &ufshcd_pci_pm_ops
200 .resume = ufshcd_pci_resume, 230 },
201#endif
202}; 231};
203 232
204module_pci_driver(ufshcd_pci_driver); 233module_pci_driver(ufshcd_pci_driver);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index c42db40d4e51..5e4623225422 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -34,6 +34,7 @@
34 */ 34 */
35 35
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/pm_runtime.h>
37 38
38#include "ufshcd.h" 39#include "ufshcd.h"
39 40
@@ -87,6 +88,40 @@ static int ufshcd_pltfrm_resume(struct device *dev)
87#define ufshcd_pltfrm_resume NULL 88#define ufshcd_pltfrm_resume NULL
88#endif 89#endif
89 90
91#ifdef CONFIG_PM_RUNTIME
92static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
93{
94 struct ufs_hba *hba = dev_get_drvdata(dev);
95
96 if (!hba)
97 return 0;
98
99 return ufshcd_runtime_suspend(hba);
100}
101static int ufshcd_pltfrm_runtime_resume(struct device *dev)
102{
103 struct ufs_hba *hba = dev_get_drvdata(dev);
104
105 if (!hba)
106 return 0;
107
108 return ufshcd_runtime_resume(hba);
109}
110static int ufshcd_pltfrm_runtime_idle(struct device *dev)
111{
112 struct ufs_hba *hba = dev_get_drvdata(dev);
113
114 if (!hba)
115 return 0;
116
117 return ufshcd_runtime_idle(hba);
118}
119#else /* !CONFIG_PM_RUNTIME */
120#define ufshcd_pltfrm_runtime_suspend NULL
121#define ufshcd_pltfrm_runtime_resume NULL
122#define ufshcd_pltfrm_runtime_idle NULL
123#endif /* CONFIG_PM_RUNTIME */
124
90/** 125/**
91 * ufshcd_pltfrm_probe - probe routine of the driver 126 * ufshcd_pltfrm_probe - probe routine of the driver
92 * @pdev: pointer to Platform device handle 127 * @pdev: pointer to Platform device handle
@@ -102,15 +137,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
102 struct device *dev = &pdev->dev; 137 struct device *dev = &pdev->dev;
103 138
104 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 139 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
105 if (!mem_res) {
106 dev_err(dev, "Memory resource not available\n");
107 err = -ENODEV;
108 goto out;
109 }
110
111 mmio_base = devm_ioremap_resource(dev, mem_res); 140 mmio_base = devm_ioremap_resource(dev, mem_res);
112 if (IS_ERR(mmio_base)) { 141 if (IS_ERR(mmio_base)) {
113 dev_err(dev, "memory map failed\n");
114 err = PTR_ERR(mmio_base); 142 err = PTR_ERR(mmio_base);
115 goto out; 143 goto out;
116 } 144 }
@@ -122,14 +150,22 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
122 goto out; 150 goto out;
123 } 151 }
124 152
153 pm_runtime_set_active(&pdev->dev);
154 pm_runtime_enable(&pdev->dev);
155
125 err = ufshcd_init(dev, &hba, mmio_base, irq); 156 err = ufshcd_init(dev, &hba, mmio_base, irq);
126 if (err) { 157 if (err) {
127 dev_err(dev, "Intialization failed\n"); 158 dev_err(dev, "Intialization failed\n");
128 goto out; 159 goto out_disable_rpm;
129 } 160 }
130 161
131 platform_set_drvdata(pdev, hba); 162 platform_set_drvdata(pdev, hba);
132 163
164 return 0;
165
166out_disable_rpm:
167 pm_runtime_disable(&pdev->dev);
168 pm_runtime_set_suspended(&pdev->dev);
133out: 169out:
134 return err; 170 return err;
135} 171}
@@ -144,7 +180,7 @@ static int ufshcd_pltfrm_remove(struct platform_device *pdev)
144{ 180{
145 struct ufs_hba *hba = platform_get_drvdata(pdev); 181 struct ufs_hba *hba = platform_get_drvdata(pdev);
146 182
147 disable_irq(hba->irq); 183 pm_runtime_get_sync(&(pdev)->dev);
148 ufshcd_remove(hba); 184 ufshcd_remove(hba);
149 return 0; 185 return 0;
150} 186}
@@ -157,6 +193,9 @@ static const struct of_device_id ufs_of_match[] = {
157static const struct dev_pm_ops ufshcd_dev_pm_ops = { 193static const struct dev_pm_ops ufshcd_dev_pm_ops = {
158 .suspend = ufshcd_pltfrm_suspend, 194 .suspend = ufshcd_pltfrm_suspend,
159 .resume = ufshcd_pltfrm_resume, 195 .resume = ufshcd_pltfrm_resume,
196 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
197 .runtime_resume = ufshcd_pltfrm_runtime_resume,
198 .runtime_idle = ufshcd_pltfrm_runtime_idle,
160}; 199};
161 200
162static struct platform_driver ufshcd_pltfrm_driver = { 201static struct platform_driver ufshcd_pltfrm_driver = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b743bd6fce6b..b36ca9a2dfbb 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -43,6 +43,19 @@
43/* UIC command timeout, unit: ms */ 43/* UIC command timeout, unit: ms */
44#define UIC_CMD_TIMEOUT 500 44#define UIC_CMD_TIMEOUT 500
45 45
46/* NOP OUT retries waiting for NOP IN response */
47#define NOP_OUT_RETRIES 10
48/* Timeout after 30 msecs if NOP OUT hangs without response */
49#define NOP_OUT_TIMEOUT 30 /* msecs */
50
51/* Query request retries */
52#define QUERY_REQ_RETRIES 10
53/* Query request timeout */
54#define QUERY_REQ_TIMEOUT 30 /* msec */
55
56/* Expose the flag value from utp_upiu_query.value */
57#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
58
46enum { 59enum {
47 UFSHCD_MAX_CHANNEL = 0, 60 UFSHCD_MAX_CHANNEL = 0,
48 UFSHCD_MAX_ID = 1, 61 UFSHCD_MAX_ID = 1,
@@ -71,6 +84,40 @@ enum {
71 INT_AGGR_CONFIG, 84 INT_AGGR_CONFIG,
72}; 85};
73 86
87/*
88 * ufshcd_wait_for_register - wait for register value to change
89 * @hba - per-adapter interface
90 * @reg - mmio register offset
91 * @mask - mask to apply to read register value
92 * @val - wait condition
93 * @interval_us - polling interval in microsecs
94 * @timeout_ms - timeout in millisecs
95 *
96 * Returns -ETIMEDOUT on error, zero on success
97 */
98static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
99 u32 val, unsigned long interval_us, unsigned long timeout_ms)
100{
101 int err = 0;
102 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
103
104 /* ignore bits that we don't intend to wait on */
105 val = val & mask;
106
107 while ((ufshcd_readl(hba, reg) & mask) != val) {
108 /* wakeup within 50us of expiry */
109 usleep_range(interval_us, interval_us + 50);
110
111 if (time_after(jiffies, timeout)) {
112 if ((ufshcd_readl(hba, reg) & mask) != val)
113 err = -ETIMEDOUT;
114 break;
115 }
116 }
117
118 return err;
119}
120
74/** 121/**
75 * ufshcd_get_intr_mask - Get the interrupt bit mask 122 * ufshcd_get_intr_mask - Get the interrupt bit mask
76 * @hba - Pointer to adapter instance 123 * @hba - Pointer to adapter instance
@@ -191,18 +238,13 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
191} 238}
192 239
193/** 240/**
194 * ufshcd_is_valid_req_rsp - checks if controller TR response is valid 241 * ufshcd_get_req_rsp - returns the TR response transaction type
195 * @ucd_rsp_ptr: pointer to response UPIU 242 * @ucd_rsp_ptr: pointer to response UPIU
196 *
197 * This function checks the response UPIU for valid transaction type in
198 * response field
199 * Returns 0 on success, non-zero on failure
200 */ 243 */
201static inline int 244static inline int
202ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) 245ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
203{ 246{
204 return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) == 247 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
205 UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
206} 248}
207 249
208/** 250/**
@@ -219,6 +261,21 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
219} 261}
220 262
221/** 263/**
264 * ufshcd_is_exception_event - Check if the device raised an exception event
265 * @ucd_rsp_ptr: pointer to response UPIU
266 *
267 * The function checks if the device raised an exception event indicated in
268 * the Device Information field of response UPIU.
269 *
270 * Returns true if exception is raised, false otherwise.
271 */
272static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
273{
274 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
275 MASK_RSP_EXCEPTION_EVENT ? true : false;
276}
277
278/**
222 * ufshcd_config_int_aggr - Configure interrupt aggregation values. 279 * ufshcd_config_int_aggr - Configure interrupt aggregation values.
223 * Currently there is no use case where we want to configure 280 * Currently there is no use case where we want to configure
224 * interrupt aggregation dynamically. So to configure interrupt 281 * interrupt aggregation dynamically. So to configure interrupt
@@ -299,14 +356,68 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
299{ 356{
300 int len; 357 int len;
301 if (lrbp->sense_buffer) { 358 if (lrbp->sense_buffer) {
302 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len); 359 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
303 memcpy(lrbp->sense_buffer, 360 memcpy(lrbp->sense_buffer,
304 lrbp->ucd_rsp_ptr->sense_data, 361 lrbp->ucd_rsp_ptr->sr.sense_data,
305 min_t(int, len, SCSI_SENSE_BUFFERSIZE)); 362 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
306 } 363 }
307} 364}
308 365
309/** 366/**
367 * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
368 * @response: upiu query response to convert
369 */
370static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
371{
372 response->length = be16_to_cpu(response->length);
373 response->value = be32_to_cpu(response->value);
374}
375
376/**
377 * ufshcd_query_to_be() - formats the buffer to big endian
378 * @request: upiu query request to convert
379 */
380static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
381{
382 request->length = cpu_to_be16(request->length);
383 request->value = cpu_to_be32(request->value);
384}
385
386/**
387 * ufshcd_copy_query_response() - Copy the Query Response and the data
388 * descriptor
389 * @hba: per adapter instance
390 * @lrb - pointer to local reference block
391 */
392static
393void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
394{
395 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
396
397 /* Get the UPIU response */
398 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
399 UPIU_RSP_CODE_OFFSET;
400
401 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
402 ufshcd_query_to_cpu(&query_res->upiu_res);
403
404
405 /* Get the descriptor */
406 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
407 u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr +
408 GENERAL_UPIU_REQUEST_SIZE;
409 u16 len;
410
411 /* data segment length */
412 len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
413 MASK_QUERY_DATA_SEG_LEN;
414
415 memcpy(hba->dev_cmd.query.descriptor, descp,
416 min_t(u16, len, QUERY_DESC_MAX_SIZE));
417 }
418}
419
420/**
310 * ufshcd_hba_capabilities - Read controller capabilities 421 * ufshcd_hba_capabilities - Read controller capabilities
311 * @hba: per adapter instance 422 * @hba: per adapter instance
312 */ 423 */
@@ -519,76 +630,170 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
519} 630}
520 631
521/** 632/**
633 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
634 * descriptor according to request
635 * @lrbp: pointer to local reference block
636 * @upiu_flags: flags required in the header
637 * @cmd_dir: requests data direction
638 */
639static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
640 u32 *upiu_flags, enum dma_data_direction cmd_dir)
641{
642 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
643 u32 data_direction;
644 u32 dword_0;
645
646 if (cmd_dir == DMA_FROM_DEVICE) {
647 data_direction = UTP_DEVICE_TO_HOST;
648 *upiu_flags = UPIU_CMD_FLAGS_READ;
649 } else if (cmd_dir == DMA_TO_DEVICE) {
650 data_direction = UTP_HOST_TO_DEVICE;
651 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
652 } else {
653 data_direction = UTP_NO_DATA_TRANSFER;
654 *upiu_flags = UPIU_CMD_FLAGS_NONE;
655 }
656
657 dword_0 = data_direction | (lrbp->command_type
658 << UPIU_COMMAND_TYPE_OFFSET);
659 if (lrbp->intr_cmd)
660 dword_0 |= UTP_REQ_DESC_INT_CMD;
661
662 /* Transfer request descriptor header fields */
663 req_desc->header.dword_0 = cpu_to_le32(dword_0);
664
665 /*
666 * assigning invalid value for command status. Controller
667 * updates OCS on command completion, with the command
668 * status
669 */
670 req_desc->header.dword_2 =
671 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
672}
673
674/**
675 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
676 * for scsi commands
677 * @lrbp - local reference block pointer
678 * @upiu_flags - flags
679 */
680static
681void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
682{
683 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
684
685 /* command descriptor fields */
686 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
687 UPIU_TRANSACTION_COMMAND, upiu_flags,
688 lrbp->lun, lrbp->task_tag);
689 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
690 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
691
692 /* Total EHS length and Data segment length will be zero */
693 ucd_req_ptr->header.dword_2 = 0;
694
695 ucd_req_ptr->sc.exp_data_transfer_len =
696 cpu_to_be32(lrbp->cmd->sdb.length);
697
698 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
699 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
700}
701
702/**
703 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
704 * for query requsts
705 * @hba: UFS hba
706 * @lrbp: local reference block pointer
707 * @upiu_flags: flags
708 */
709static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
710 struct ufshcd_lrb *lrbp, u32 upiu_flags)
711{
712 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
713 struct ufs_query *query = &hba->dev_cmd.query;
714 u16 len = query->request.upiu_req.length;
715 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
716
717 /* Query request header */
718 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
719 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
720 lrbp->lun, lrbp->task_tag);
721 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
722 0, query->request.query_func, 0, 0);
723
724 /* Data segment length */
725 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
726 0, 0, len >> 8, (u8)len);
727
728 /* Copy the Query Request buffer as is */
729 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
730 QUERY_OSF_SIZE);
731 ufshcd_query_to_be(&ucd_req_ptr->qr);
732
733 /* Copy the Descriptor */
734 if ((len > 0) && (query->request.upiu_req.opcode ==
735 UPIU_QUERY_OPCODE_WRITE_DESC)) {
736 memcpy(descp, query->descriptor,
737 min_t(u16, len, QUERY_DESC_MAX_SIZE));
738 }
739}
740
741static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
742{
743 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
744
745 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
746
747 /* command descriptor fields */
748 ucd_req_ptr->header.dword_0 =
749 UPIU_HEADER_DWORD(
750 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
751}
752
753/**
522 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) 754 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
755 * @hba - per adapter instance
523 * @lrb - pointer to local reference block 756 * @lrb - pointer to local reference block
524 */ 757 */
525static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) 758static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
526{ 759{
527 struct utp_transfer_req_desc *req_desc;
528 struct utp_upiu_cmd *ucd_cmd_ptr;
529 u32 data_direction;
530 u32 upiu_flags; 760 u32 upiu_flags;
531 761 int ret = 0;
532 ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
533 req_desc = lrbp->utr_descriptor_ptr;
534 762
535 switch (lrbp->command_type) { 763 switch (lrbp->command_type) {
536 case UTP_CMD_TYPE_SCSI: 764 case UTP_CMD_TYPE_SCSI:
537 if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 765 if (likely(lrbp->cmd)) {
538 data_direction = UTP_DEVICE_TO_HOST; 766 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
539 upiu_flags = UPIU_CMD_FLAGS_READ; 767 lrbp->cmd->sc_data_direction);
540 } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) { 768 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
541 data_direction = UTP_HOST_TO_DEVICE;
542 upiu_flags = UPIU_CMD_FLAGS_WRITE;
543 } else { 769 } else {
544 data_direction = UTP_NO_DATA_TRANSFER; 770 ret = -EINVAL;
545 upiu_flags = UPIU_CMD_FLAGS_NONE;
546 } 771 }
547
548 /* Transfer request descriptor header fields */
549 req_desc->header.dword_0 =
550 cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
551
552 /*
553 * assigning invalid value for command status. Controller
554 * updates OCS on command completion, with the command
555 * status
556 */
557 req_desc->header.dword_2 =
558 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
559
560 /* command descriptor fields */
561 ucd_cmd_ptr->header.dword_0 =
562 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
563 upiu_flags,
564 lrbp->lun,
565 lrbp->task_tag));
566 ucd_cmd_ptr->header.dword_1 =
567 cpu_to_be32(
568 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
569 0,
570 0,
571 0));
572
573 /* Total EHS length and Data segment length will be zero */
574 ucd_cmd_ptr->header.dword_2 = 0;
575
576 ucd_cmd_ptr->exp_data_transfer_len =
577 cpu_to_be32(lrbp->cmd->sdb.length);
578
579 memcpy(ucd_cmd_ptr->cdb,
580 lrbp->cmd->cmnd,
581 (min_t(unsigned short,
582 lrbp->cmd->cmd_len,
583 MAX_CDB_SIZE)));
584 break; 772 break;
585 case UTP_CMD_TYPE_DEV_MANAGE: 773 case UTP_CMD_TYPE_DEV_MANAGE:
586 /* For query function implementation */ 774 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
775 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
776 ufshcd_prepare_utp_query_req_upiu(
777 hba, lrbp, upiu_flags);
778 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
779 ufshcd_prepare_utp_nop_upiu(lrbp);
780 else
781 ret = -EINVAL;
587 break; 782 break;
588 case UTP_CMD_TYPE_UFS: 783 case UTP_CMD_TYPE_UFS:
589 /* For UFS native command implementation */ 784 /* For UFS native command implementation */
785 ret = -ENOTSUPP;
786 dev_err(hba->dev, "%s: UFS native command are not supported\n",
787 __func__);
788 break;
789 default:
790 ret = -ENOTSUPP;
791 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
792 __func__, lrbp->command_type);
590 break; 793 break;
591 } /* end of switch */ 794 } /* end of switch */
795
796 return ret;
592} 797}
593 798
594/** 799/**
@@ -615,21 +820,37 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
615 goto out; 820 goto out;
616 } 821 }
617 822
823 /* acquire the tag to make sure device cmds don't use it */
824 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
825 /*
826 * Dev manage command in progress, requeue the command.
827 * Requeuing the command helps in cases where the request *may*
828 * find different tag instead of waiting for dev manage command
829 * completion.
830 */
831 err = SCSI_MLQUEUE_HOST_BUSY;
832 goto out;
833 }
834
618 lrbp = &hba->lrb[tag]; 835 lrbp = &hba->lrb[tag];
619 836
837 WARN_ON(lrbp->cmd);
620 lrbp->cmd = cmd; 838 lrbp->cmd = cmd;
621 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; 839 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
622 lrbp->sense_buffer = cmd->sense_buffer; 840 lrbp->sense_buffer = cmd->sense_buffer;
623 lrbp->task_tag = tag; 841 lrbp->task_tag = tag;
624 lrbp->lun = cmd->device->lun; 842 lrbp->lun = cmd->device->lun;
625 843 lrbp->intr_cmd = false;
626 lrbp->command_type = UTP_CMD_TYPE_SCSI; 844 lrbp->command_type = UTP_CMD_TYPE_SCSI;
627 845
628 /* form UPIU before issuing the command */ 846 /* form UPIU before issuing the command */
629 ufshcd_compose_upiu(lrbp); 847 ufshcd_compose_upiu(hba, lrbp);
630 err = ufshcd_map_sg(lrbp); 848 err = ufshcd_map_sg(lrbp);
631 if (err) 849 if (err) {
850 lrbp->cmd = NULL;
851 clear_bit_unlock(tag, &hba->lrb_in_use);
632 goto out; 852 goto out;
853 }
633 854
634 /* issue command to the controller */ 855 /* issue command to the controller */
635 spin_lock_irqsave(hba->host->host_lock, flags); 856 spin_lock_irqsave(hba->host->host_lock, flags);
@@ -639,6 +860,338 @@ out:
639 return err; 860 return err;
640} 861}
641 862
863static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
864 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
865{
866 lrbp->cmd = NULL;
867 lrbp->sense_bufflen = 0;
868 lrbp->sense_buffer = NULL;
869 lrbp->task_tag = tag;
870 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
871 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
872 lrbp->intr_cmd = true; /* No interrupt aggregation */
873 hba->dev_cmd.type = cmd_type;
874
875 return ufshcd_compose_upiu(hba, lrbp);
876}
877
878static int
879ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
880{
881 int err = 0;
882 unsigned long flags;
883 u32 mask = 1 << tag;
884
885 /* clear outstanding transaction before retry */
886 spin_lock_irqsave(hba->host->host_lock, flags);
887 ufshcd_utrl_clear(hba, tag);
888 spin_unlock_irqrestore(hba->host->host_lock, flags);
889
890 /*
891 * wait for for h/w to clear corresponding bit in door-bell.
892 * max. wait is 1 sec.
893 */
894 err = ufshcd_wait_for_register(hba,
895 REG_UTP_TRANSFER_REQ_DOOR_BELL,
896 mask, ~mask, 1000, 1000);
897
898 return err;
899}
900
901/**
902 * ufshcd_dev_cmd_completion() - handles device management command responses
903 * @hba: per adapter instance
904 * @lrbp: pointer to local reference block
905 */
906static int
907ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
908{
909 int resp;
910 int err = 0;
911
912 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
913
914 switch (resp) {
915 case UPIU_TRANSACTION_NOP_IN:
916 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
917 err = -EINVAL;
918 dev_err(hba->dev, "%s: unexpected response %x\n",
919 __func__, resp);
920 }
921 break;
922 case UPIU_TRANSACTION_QUERY_RSP:
923 ufshcd_copy_query_response(hba, lrbp);
924 break;
925 case UPIU_TRANSACTION_REJECT_UPIU:
926 /* TODO: handle Reject UPIU Response */
927 err = -EPERM;
928 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
929 __func__);
930 break;
931 default:
932 err = -EINVAL;
933 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
934 __func__, resp);
935 break;
936 }
937
938 return err;
939}
940
941static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
942 struct ufshcd_lrb *lrbp, int max_timeout)
943{
944 int err = 0;
945 unsigned long time_left;
946 unsigned long flags;
947
948 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
949 msecs_to_jiffies(max_timeout));
950
951 spin_lock_irqsave(hba->host->host_lock, flags);
952 hba->dev_cmd.complete = NULL;
953 if (likely(time_left)) {
954 err = ufshcd_get_tr_ocs(lrbp);
955 if (!err)
956 err = ufshcd_dev_cmd_completion(hba, lrbp);
957 }
958 spin_unlock_irqrestore(hba->host->host_lock, flags);
959
960 if (!time_left) {
961 err = -ETIMEDOUT;
962 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
963 /* sucessfully cleared the command, retry if needed */
964 err = -EAGAIN;
965 }
966
967 return err;
968}
969
970/**
971 * ufshcd_get_dev_cmd_tag - Get device management command tag
972 * @hba: per-adapter instance
973 * @tag: pointer to variable with available slot value
974 *
975 * Get a free slot and lock it until device management command
976 * completes.
977 *
978 * Returns false if free slot is unavailable for locking, else
979 * return true with tag value in @tag.
980 */
981static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
982{
983 int tag;
984 bool ret = false;
985 unsigned long tmp;
986
987 if (!tag_out)
988 goto out;
989
990 do {
991 tmp = ~hba->lrb_in_use;
992 tag = find_last_bit(&tmp, hba->nutrs);
993 if (tag >= hba->nutrs)
994 goto out;
995 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
996
997 *tag_out = tag;
998 ret = true;
999out:
1000 return ret;
1001}
1002
1003static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1004{
1005 clear_bit_unlock(tag, &hba->lrb_in_use);
1006}
1007
1008/**
1009 * ufshcd_exec_dev_cmd - API for sending device management requests
1010 * @hba - UFS hba
1011 * @cmd_type - specifies the type (NOP, Query...)
1012 * @timeout - time in seconds
1013 *
1014 * NOTE: Since there is only one available tag for device management commands,
1015 * it is expected you hold the hba->dev_cmd.lock mutex.
1016 */
1017static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1018 enum dev_cmd_type cmd_type, int timeout)
1019{
1020 struct ufshcd_lrb *lrbp;
1021 int err;
1022 int tag;
1023 struct completion wait;
1024 unsigned long flags;
1025
1026 /*
1027 * Get free slot, sleep if slots are unavailable.
1028 * Even though we use wait_event() which sleeps indefinitely,
1029 * the maximum wait time is bounded by SCSI request timeout.
1030 */
1031 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1032
1033 init_completion(&wait);
1034 lrbp = &hba->lrb[tag];
1035 WARN_ON(lrbp->cmd);
1036 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1037 if (unlikely(err))
1038 goto out_put_tag;
1039
1040 hba->dev_cmd.complete = &wait;
1041
1042 spin_lock_irqsave(hba->host->host_lock, flags);
1043 ufshcd_send_command(hba, tag);
1044 spin_unlock_irqrestore(hba->host->host_lock, flags);
1045
1046 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1047
1048out_put_tag:
1049 ufshcd_put_dev_cmd_tag(hba, tag);
1050 wake_up(&hba->dev_cmd.tag_wq);
1051 return err;
1052}
1053
1054/**
1055 * ufshcd_query_flag() - API function for sending flag query requests
1056 * hba: per-adapter instance
1057 * query_opcode: flag query to perform
1058 * idn: flag idn to access
1059 * flag_res: the flag value after the query request completes
1060 *
1061 * Returns 0 for success, non-zero in case of failure
1062 */
1063static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1064 enum flag_idn idn, bool *flag_res)
1065{
1066 struct ufs_query_req *request;
1067 struct ufs_query_res *response;
1068 int err;
1069
1070 BUG_ON(!hba);
1071
1072 mutex_lock(&hba->dev_cmd.lock);
1073 request = &hba->dev_cmd.query.request;
1074 response = &hba->dev_cmd.query.response;
1075 memset(request, 0, sizeof(struct ufs_query_req));
1076 memset(response, 0, sizeof(struct ufs_query_res));
1077
1078 switch (opcode) {
1079 case UPIU_QUERY_OPCODE_SET_FLAG:
1080 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1081 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1082 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1083 break;
1084 case UPIU_QUERY_OPCODE_READ_FLAG:
1085 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1086 if (!flag_res) {
1087 /* No dummy reads */
1088 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1089 __func__);
1090 err = -EINVAL;
1091 goto out_unlock;
1092 }
1093 break;
1094 default:
1095 dev_err(hba->dev,
1096 "%s: Expected query flag opcode but got = %d\n",
1097 __func__, opcode);
1098 err = -EINVAL;
1099 goto out_unlock;
1100 }
1101 request->upiu_req.opcode = opcode;
1102 request->upiu_req.idn = idn;
1103
1104 /* Send query request */
1105 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
1106 QUERY_REQ_TIMEOUT);
1107
1108 if (err) {
1109 dev_err(hba->dev,
1110 "%s: Sending flag query for idn %d failed, err = %d\n",
1111 __func__, idn, err);
1112 goto out_unlock;
1113 }
1114
1115 if (flag_res)
1116 *flag_res = (response->upiu_res.value &
1117 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1118
1119out_unlock:
1120 mutex_unlock(&hba->dev_cmd.lock);
1121 return err;
1122}
1123
1124/**
1125 * ufshcd_query_attr - API function for sending attribute requests
1126 * hba: per-adapter instance
1127 * opcode: attribute opcode
1128 * idn: attribute idn to access
1129 * index: index field
1130 * selector: selector field
1131 * attr_val: the attribute value after the query request completes
1132 *
1133 * Returns 0 for success, non-zero in case of failure
1134*/
1135int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1136 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1137{
1138 struct ufs_query_req *request;
1139 struct ufs_query_res *response;
1140 int err;
1141
1142 BUG_ON(!hba);
1143
1144 if (!attr_val) {
1145 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1146 __func__, opcode);
1147 err = -EINVAL;
1148 goto out;
1149 }
1150
1151 mutex_lock(&hba->dev_cmd.lock);
1152 request = &hba->dev_cmd.query.request;
1153 response = &hba->dev_cmd.query.response;
1154 memset(request, 0, sizeof(struct ufs_query_req));
1155 memset(response, 0, sizeof(struct ufs_query_res));
1156
1157 switch (opcode) {
1158 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1159 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1160 request->upiu_req.value = *attr_val;
1161 break;
1162 case UPIU_QUERY_OPCODE_READ_ATTR:
1163 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1164 break;
1165 default:
1166 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1167 __func__, opcode);
1168 err = -EINVAL;
1169 goto out_unlock;
1170 }
1171
1172 request->upiu_req.opcode = opcode;
1173 request->upiu_req.idn = idn;
1174 request->upiu_req.index = index;
1175 request->upiu_req.selector = selector;
1176
1177 /* Send query request */
1178 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
1179 QUERY_REQ_TIMEOUT);
1180
1181 if (err) {
1182 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1183 __func__, opcode, idn, err);
1184 goto out_unlock;
1185 }
1186
1187 *attr_val = response->upiu_res.value;
1188
1189out_unlock:
1190 mutex_unlock(&hba->dev_cmd.lock);
1191out:
1192 return err;
1193}
1194
642/** 1195/**
643 * ufshcd_memory_alloc - allocate memory for host memory space data structures 1196 * ufshcd_memory_alloc - allocate memory for host memory space data structures
644 * @hba: per adapter instance 1197 * @hba: per adapter instance
@@ -774,8 +1327,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
774 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); 1327 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
775 1328
776 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); 1329 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
777 hba->lrb[i].ucd_cmd_ptr = 1330 hba->lrb[i].ucd_req_ptr =
778 (struct utp_upiu_cmd *)(cmd_descp + i); 1331 (struct utp_upiu_req *)(cmd_descp + i);
779 hba->lrb[i].ucd_rsp_ptr = 1332 hba->lrb[i].ucd_rsp_ptr =
780 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; 1333 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
781 hba->lrb[i].ucd_prdt_ptr = 1334 hba->lrb[i].ucd_prdt_ptr =
@@ -809,6 +1362,57 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
809} 1362}
810 1363
811/** 1364/**
1365 * ufshcd_complete_dev_init() - checks device readiness
1366 * hba: per-adapter instance
1367 *
1368 * Set fDeviceInit flag and poll until device toggles it.
1369 */
1370static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1371{
1372 int i, retries, err = 0;
1373 bool flag_res = 1;
1374
1375 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1376 /* Set the fDeviceInit flag */
1377 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1378 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1379 if (!err || err == -ETIMEDOUT)
1380 break;
1381 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1382 }
1383 if (err) {
1384 dev_err(hba->dev,
1385 "%s setting fDeviceInit flag failed with error %d\n",
1386 __func__, err);
1387 goto out;
1388 }
1389
1390 /* poll for max. 100 iterations for fDeviceInit flag to clear */
1391 for (i = 0; i < 100 && !err && flag_res; i++) {
1392 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1393 err = ufshcd_query_flag(hba,
1394 UPIU_QUERY_OPCODE_READ_FLAG,
1395 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
1396 if (!err || err == -ETIMEDOUT)
1397 break;
1398 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
1399 err);
1400 }
1401 }
1402 if (err)
1403 dev_err(hba->dev,
1404 "%s reading fDeviceInit flag failed with error %d\n",
1405 __func__, err);
1406 else if (flag_res)
1407 dev_err(hba->dev,
1408 "%s fDeviceInit was not cleared by the device\n",
1409 __func__);
1410
1411out:
1412 return err;
1413}
1414
1415/**
812 * ufshcd_make_hba_operational - Make UFS controller operational 1416 * ufshcd_make_hba_operational - Make UFS controller operational
813 * @hba: per adapter instance 1417 * @hba: per adapter instance
814 * 1418 *
@@ -961,6 +1565,38 @@ out:
961} 1565}
962 1566
963/** 1567/**
1568 * ufshcd_verify_dev_init() - Verify device initialization
1569 * @hba: per-adapter instance
1570 *
1571 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
1572 * device Transport Protocol (UTP) layer is ready after a reset.
1573 * If the UTP layer at the device side is not initialized, it may
1574 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
1575 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
1576 */
1577static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1578{
1579 int err = 0;
1580 int retries;
1581
1582 mutex_lock(&hba->dev_cmd.lock);
1583 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1584 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1585 NOP_OUT_TIMEOUT);
1586
1587 if (!err || err == -ETIMEDOUT)
1588 break;
1589
1590 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1591 }
1592 mutex_unlock(&hba->dev_cmd.lock);
1593
1594 if (err)
1595 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1596 return err;
1597}
1598
1599/**
964 * ufshcd_do_reset - reset the host controller 1600 * ufshcd_do_reset - reset the host controller
965 * @hba: per adapter instance 1601 * @hba: per adapter instance
966 * 1602 *
@@ -986,13 +1622,20 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
986 for (tag = 0; tag < hba->nutrs; tag++) { 1622 for (tag = 0; tag < hba->nutrs; tag++) {
987 if (test_bit(tag, &hba->outstanding_reqs)) { 1623 if (test_bit(tag, &hba->outstanding_reqs)) {
988 lrbp = &hba->lrb[tag]; 1624 lrbp = &hba->lrb[tag];
989 scsi_dma_unmap(lrbp->cmd); 1625 if (lrbp->cmd) {
990 lrbp->cmd->result = DID_RESET << 16; 1626 scsi_dma_unmap(lrbp->cmd);
991 lrbp->cmd->scsi_done(lrbp->cmd); 1627 lrbp->cmd->result = DID_RESET << 16;
992 lrbp->cmd = NULL; 1628 lrbp->cmd->scsi_done(lrbp->cmd);
1629 lrbp->cmd = NULL;
1630 clear_bit_unlock(tag, &hba->lrb_in_use);
1631 }
993 } 1632 }
994 } 1633 }
995 1634
1635 /* complete device management command */
1636 if (hba->dev_cmd.complete)
1637 complete(hba->dev_cmd.complete);
1638
996 /* clear outstanding request/task bit maps */ 1639 /* clear outstanding request/task bit maps */
997 hba->outstanding_reqs = 0; 1640 hba->outstanding_reqs = 0;
998 hba->outstanding_tasks = 0; 1641 hba->outstanding_tasks = 0;
@@ -1199,27 +1842,39 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1199 1842
1200 switch (ocs) { 1843 switch (ocs) {
1201 case OCS_SUCCESS: 1844 case OCS_SUCCESS:
1845 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1202 1846
1203 /* check if the returned transfer response is valid */ 1847 switch (result) {
1204 result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); 1848 case UPIU_TRANSACTION_RESPONSE:
1205 if (result) { 1849 /*
1850 * get the response UPIU result to extract
1851 * the SCSI command status
1852 */
1853 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
1854
1855 /*
1856 * get the result based on SCSI status response
1857 * to notify the SCSI midlayer of the command status
1858 */
1859 scsi_status = result & MASK_SCSI_STATUS;
1860 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
1861
1862 if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
1863 schedule_work(&hba->eeh_work);
1864 break;
1865 case UPIU_TRANSACTION_REJECT_UPIU:
1866 /* TODO: handle Reject UPIU Response */
1867 result = DID_ERROR << 16;
1868 dev_err(hba->dev,
1869 "Reject UPIU not fully implemented\n");
1870 break;
1871 default:
1872 result = DID_ERROR << 16;
1206 dev_err(hba->dev, 1873 dev_err(hba->dev,
1207 "Invalid response = %x\n", result); 1874 "Unexpected request response code = %x\n",
1875 result);
1208 break; 1876 break;
1209 } 1877 }
1210
1211 /*
1212 * get the response UPIU result to extract
1213 * the SCSI command status
1214 */
1215 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
1216
1217 /*
1218 * get the result based on SCSI status response
1219 * to notify the SCSI midlayer of the command status
1220 */
1221 scsi_status = result & MASK_SCSI_STATUS;
1222 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
1223 break; 1878 break;
1224 case OCS_ABORTED: 1879 case OCS_ABORTED:
1225 result |= DID_ABORT << 16; 1880 result |= DID_ABORT << 16;
@@ -1259,28 +1914,40 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
1259 */ 1914 */
1260static void ufshcd_transfer_req_compl(struct ufs_hba *hba) 1915static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1261{ 1916{
1262 struct ufshcd_lrb *lrb; 1917 struct ufshcd_lrb *lrbp;
1918 struct scsi_cmnd *cmd;
1263 unsigned long completed_reqs; 1919 unsigned long completed_reqs;
1264 u32 tr_doorbell; 1920 u32 tr_doorbell;
1265 int result; 1921 int result;
1266 int index; 1922 int index;
1923 bool int_aggr_reset = false;
1267 1924
1268 lrb = hba->lrb;
1269 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 1925 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1270 completed_reqs = tr_doorbell ^ hba->outstanding_reqs; 1926 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
1271 1927
1272 for (index = 0; index < hba->nutrs; index++) { 1928 for (index = 0; index < hba->nutrs; index++) {
1273 if (test_bit(index, &completed_reqs)) { 1929 if (test_bit(index, &completed_reqs)) {
1930 lrbp = &hba->lrb[index];
1931 cmd = lrbp->cmd;
1932 /*
1933 * Don't skip resetting interrupt aggregation counters
1934 * if a regular command is present.
1935 */
1936 int_aggr_reset |= !lrbp->intr_cmd;
1274 1937
1275 result = ufshcd_transfer_rsp_status(hba, &lrb[index]); 1938 if (cmd) {
1276 1939 result = ufshcd_transfer_rsp_status(hba, lrbp);
1277 if (lrb[index].cmd) { 1940 scsi_dma_unmap(cmd);
1278 scsi_dma_unmap(lrb[index].cmd); 1941 cmd->result = result;
1279 lrb[index].cmd->result = result;
1280 lrb[index].cmd->scsi_done(lrb[index].cmd);
1281
1282 /* Mark completed command as NULL in LRB */ 1942 /* Mark completed command as NULL in LRB */
1283 lrb[index].cmd = NULL; 1943 lrbp->cmd = NULL;
1944 clear_bit_unlock(index, &hba->lrb_in_use);
1945 /* Do not touch lrbp after scsi done */
1946 cmd->scsi_done(cmd);
1947 } else if (lrbp->command_type ==
1948 UTP_CMD_TYPE_DEV_MANAGE) {
1949 if (hba->dev_cmd.complete)
1950 complete(hba->dev_cmd.complete);
1284 } 1951 }
1285 } /* end of if */ 1952 } /* end of if */
1286 } /* end of for */ 1953 } /* end of for */
@@ -1288,8 +1955,238 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1288 /* clear corresponding bits of completed commands */ 1955 /* clear corresponding bits of completed commands */
1289 hba->outstanding_reqs ^= completed_reqs; 1956 hba->outstanding_reqs ^= completed_reqs;
1290 1957
1958 /* we might have free'd some tags above */
1959 wake_up(&hba->dev_cmd.tag_wq);
1960
1291 /* Reset interrupt aggregation counters */ 1961 /* Reset interrupt aggregation counters */
1292 ufshcd_config_int_aggr(hba, INT_AGGR_RESET); 1962 if (int_aggr_reset)
1963 ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
1964}
1965
1966/**
1967 * ufshcd_disable_ee - disable exception event
1968 * @hba: per-adapter instance
1969 * @mask: exception event to disable
1970 *
1971 * Disables exception event in the device so that the EVENT_ALERT
1972 * bit is not set.
1973 *
1974 * Returns zero on success, non-zero error value on failure.
1975 */
1976static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
1977{
1978 int err = 0;
1979 u32 val;
1980
1981 if (!(hba->ee_ctrl_mask & mask))
1982 goto out;
1983
1984 val = hba->ee_ctrl_mask & ~mask;
1985 val &= 0xFFFF; /* 2 bytes */
1986 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1987 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
1988 if (!err)
1989 hba->ee_ctrl_mask &= ~mask;
1990out:
1991 return err;
1992}
1993
1994/**
1995 * ufshcd_enable_ee - enable exception event
1996 * @hba: per-adapter instance
1997 * @mask: exception event to enable
1998 *
1999 * Enable corresponding exception event in the device to allow
2000 * device to alert host in critical scenarios.
2001 *
2002 * Returns zero on success, non-zero error value on failure.
2003 */
2004static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
2005{
2006 int err = 0;
2007 u32 val;
2008
2009 if (hba->ee_ctrl_mask & mask)
2010 goto out;
2011
2012 val = hba->ee_ctrl_mask | mask;
2013 val &= 0xFFFF; /* 2 bytes */
2014 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2015 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2016 if (!err)
2017 hba->ee_ctrl_mask |= mask;
2018out:
2019 return err;
2020}
2021
2022/**
2023 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
2024 * @hba: per-adapter instance
2025 *
2026 * Allow device to manage background operations on its own. Enabling
2027 * this might lead to inconsistent latencies during normal data transfers
2028 * as the device is allowed to manage its own way of handling background
2029 * operations.
2030 *
2031 * Returns zero on success, non-zero on failure.
2032 */
2033static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
2034{
2035 int err = 0;
2036
2037 if (hba->auto_bkops_enabled)
2038 goto out;
2039
2040 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2041 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2042 if (err) {
2043 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
2044 __func__, err);
2045 goto out;
2046 }
2047
2048 hba->auto_bkops_enabled = true;
2049
2050 /* No need of URGENT_BKOPS exception from the device */
2051 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2052 if (err)
2053 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
2054 __func__, err);
2055out:
2056 return err;
2057}
2058
2059/**
2060 * ufshcd_disable_auto_bkops - block device in doing background operations
2061 * @hba: per-adapter instance
2062 *
2063 * Disabling background operations improves command response latency but
2064 * has drawback of device moving into critical state where the device is
2065 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
2066 * host is idle so that BKOPS are managed effectively without any negative
2067 * impacts.
2068 *
2069 * Returns zero on success, non-zero on failure.
2070 */
2071static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
2072{
2073 int err = 0;
2074
2075 if (!hba->auto_bkops_enabled)
2076 goto out;
2077
2078 /*
2079 * If host assisted BKOPs is to be enabled, make sure
2080 * urgent bkops exception is allowed.
2081 */
2082 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
2083 if (err) {
2084 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
2085 __func__, err);
2086 goto out;
2087 }
2088
2089 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
2090 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2091 if (err) {
2092 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
2093 __func__, err);
2094 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2095 goto out;
2096 }
2097
2098 hba->auto_bkops_enabled = false;
2099out:
2100 return err;
2101}
2102
2103/**
2104 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
2105 * @hba: per adapter instance
2106 *
2107 * After a device reset the device may toggle the BKOPS_EN flag
2108 * to default value. The s/w tracking variables should be updated
2109 * as well. Do this by forcing enable of auto bkops.
2110 */
2111static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
2112{
2113 hba->auto_bkops_enabled = false;
2114 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
2115 ufshcd_enable_auto_bkops(hba);
2116}
2117
2118static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
2119{
2120 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2121 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
2122}
2123
2124/**
2125 * ufshcd_urgent_bkops - handle urgent bkops exception event
2126 * @hba: per-adapter instance
2127 *
2128 * Enable fBackgroundOpsEn flag in the device to permit background
2129 * operations.
2130 */
2131static int ufshcd_urgent_bkops(struct ufs_hba *hba)
2132{
2133 int err;
2134 u32 status = 0;
2135
2136 err = ufshcd_get_bkops_status(hba, &status);
2137 if (err) {
2138 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
2139 __func__, err);
2140 goto out;
2141 }
2142
2143 status = status & 0xF;
2144
2145 /* handle only if status indicates performance impact or critical */
2146 if (status >= BKOPS_STATUS_PERF_IMPACT)
2147 err = ufshcd_enable_auto_bkops(hba);
2148out:
2149 return err;
2150}
2151
2152static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
2153{
2154 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2155 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
2156}
2157
2158/**
2159 * ufshcd_exception_event_handler - handle exceptions raised by device
2160 * @work: pointer to work data
2161 *
2162 * Read bExceptionEventStatus attribute from the device and handle the
2163 * exception event accordingly.
2164 */
2165static void ufshcd_exception_event_handler(struct work_struct *work)
2166{
2167 struct ufs_hba *hba;
2168 int err;
2169 u32 status = 0;
2170 hba = container_of(work, struct ufs_hba, eeh_work);
2171
2172 pm_runtime_get_sync(hba->dev);
2173 err = ufshcd_get_ee_status(hba, &status);
2174 if (err) {
2175 dev_err(hba->dev, "%s: failed to get exception status %d\n",
2176 __func__, err);
2177 goto out;
2178 }
2179
2180 status &= hba->ee_ctrl_mask;
2181 if (status & MASK_EE_URGENT_BKOPS) {
2182 err = ufshcd_urgent_bkops(hba);
2183 if (err)
2184 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
2185 __func__, err);
2186 }
2187out:
2188 pm_runtime_put_sync(hba->dev);
2189 return;
1293} 2190}
1294 2191
1295/** 2192/**
@@ -1301,9 +2198,11 @@ static void ufshcd_fatal_err_handler(struct work_struct *work)
1301 struct ufs_hba *hba; 2198 struct ufs_hba *hba;
1302 hba = container_of(work, struct ufs_hba, feh_workq); 2199 hba = container_of(work, struct ufs_hba, feh_workq);
1303 2200
2201 pm_runtime_get_sync(hba->dev);
1304 /* check if reset is already in progress */ 2202 /* check if reset is already in progress */
1305 if (hba->ufshcd_state != UFSHCD_STATE_RESET) 2203 if (hba->ufshcd_state != UFSHCD_STATE_RESET)
1306 ufshcd_do_reset(hba); 2204 ufshcd_do_reset(hba);
2205 pm_runtime_put_sync(hba->dev);
1307} 2206}
1308 2207
1309/** 2208/**
@@ -1432,10 +2331,10 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
1432 task_req_upiup = 2331 task_req_upiup =
1433 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; 2332 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
1434 task_req_upiup->header.dword_0 = 2333 task_req_upiup->header.dword_0 =
1435 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, 2334 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
1436 lrbp->lun, lrbp->task_tag)); 2335 lrbp->lun, lrbp->task_tag);
1437 task_req_upiup->header.dword_1 = 2336 task_req_upiup->header.dword_1 =
1438 cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0)); 2337 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
1439 2338
1440 task_req_upiup->input_param1 = lrbp->lun; 2339 task_req_upiup->input_param1 = lrbp->lun;
1441 task_req_upiup->input_param1 = 2340 task_req_upiup->input_param1 =
@@ -1502,9 +2401,11 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd)
1502 if (hba->lrb[pos].cmd) { 2401 if (hba->lrb[pos].cmd) {
1503 scsi_dma_unmap(hba->lrb[pos].cmd); 2402 scsi_dma_unmap(hba->lrb[pos].cmd);
1504 hba->lrb[pos].cmd->result = 2403 hba->lrb[pos].cmd->result =
1505 DID_ABORT << 16; 2404 DID_ABORT << 16;
1506 hba->lrb[pos].cmd->scsi_done(cmd); 2405 hba->lrb[pos].cmd->scsi_done(cmd);
1507 hba->lrb[pos].cmd = NULL; 2406 hba->lrb[pos].cmd = NULL;
2407 clear_bit_unlock(pos, &hba->lrb_in_use);
2408 wake_up(&hba->dev_cmd.tag_wq);
1508 } 2409 }
1509 } 2410 }
1510 } /* end of for */ 2411 } /* end of for */
@@ -1572,6 +2473,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
1572 __clear_bit(tag, &hba->outstanding_reqs); 2473 __clear_bit(tag, &hba->outstanding_reqs);
1573 hba->lrb[tag].cmd = NULL; 2474 hba->lrb[tag].cmd = NULL;
1574 spin_unlock_irqrestore(host->host_lock, flags); 2475 spin_unlock_irqrestore(host->host_lock, flags);
2476
2477 clear_bit_unlock(tag, &hba->lrb_in_use);
2478 wake_up(&hba->dev_cmd.tag_wq);
1575out: 2479out:
1576 return err; 2480 return err;
1577} 2481}
@@ -1587,8 +2491,22 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
1587 int ret; 2491 int ret;
1588 2492
1589 ret = ufshcd_link_startup(hba); 2493 ret = ufshcd_link_startup(hba);
1590 if (!ret) 2494 if (ret)
1591 scsi_scan_host(hba->host); 2495 goto out;
2496
2497 ret = ufshcd_verify_dev_init(hba);
2498 if (ret)
2499 goto out;
2500
2501 ret = ufshcd_complete_dev_init(hba);
2502 if (ret)
2503 goto out;
2504
2505 ufshcd_force_reset_auto_bkops(hba);
2506 scsi_scan_host(hba->host);
2507 pm_runtime_put_sync(hba->dev);
2508out:
2509 return;
1592} 2510}
1593 2511
1594static struct scsi_host_template ufshcd_driver_template = { 2512static struct scsi_host_template ufshcd_driver_template = {
@@ -1650,6 +2568,34 @@ int ufshcd_resume(struct ufs_hba *hba)
1650} 2568}
1651EXPORT_SYMBOL_GPL(ufshcd_resume); 2569EXPORT_SYMBOL_GPL(ufshcd_resume);
1652 2570
2571int ufshcd_runtime_suspend(struct ufs_hba *hba)
2572{
2573 if (!hba)
2574 return 0;
2575
2576 /*
2577 * The device is idle with no requests in the queue,
2578 * allow background operations.
2579 */
2580 return ufshcd_enable_auto_bkops(hba);
2581}
2582EXPORT_SYMBOL(ufshcd_runtime_suspend);
2583
2584int ufshcd_runtime_resume(struct ufs_hba *hba)
2585{
2586 if (!hba)
2587 return 0;
2588
2589 return ufshcd_disable_auto_bkops(hba);
2590}
2591EXPORT_SYMBOL(ufshcd_runtime_resume);
2592
2593int ufshcd_runtime_idle(struct ufs_hba *hba)
2594{
2595 return 0;
2596}
2597EXPORT_SYMBOL(ufshcd_runtime_idle);
2598
1653/** 2599/**
1654 * ufshcd_remove - de-allocate SCSI host and host memory space 2600 * ufshcd_remove - de-allocate SCSI host and host memory space
1655 * data structure memory 2601 * data structure memory
@@ -1657,11 +2603,11 @@ EXPORT_SYMBOL_GPL(ufshcd_resume);
1657 */ 2603 */
1658void ufshcd_remove(struct ufs_hba *hba) 2604void ufshcd_remove(struct ufs_hba *hba)
1659{ 2605{
2606 scsi_remove_host(hba->host);
1660 /* disable interrupts */ 2607 /* disable interrupts */
1661 ufshcd_disable_intr(hba, hba->intr_mask); 2608 ufshcd_disable_intr(hba, hba->intr_mask);
1662 ufshcd_hba_stop(hba); 2609 ufshcd_hba_stop(hba);
1663 2610
1664 scsi_remove_host(hba->host);
1665 scsi_host_put(hba->host); 2611 scsi_host_put(hba->host);
1666} 2612}
1667EXPORT_SYMBOL_GPL(ufshcd_remove); 2613EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -1740,10 +2686,17 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
1740 2686
1741 /* Initialize work queues */ 2687 /* Initialize work queues */
1742 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); 2688 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
2689 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
1743 2690
1744 /* Initialize UIC command mutex */ 2691 /* Initialize UIC command mutex */
1745 mutex_init(&hba->uic_cmd_mutex); 2692 mutex_init(&hba->uic_cmd_mutex);
1746 2693
2694 /* Initialize mutex for device management commands */
2695 mutex_init(&hba->dev_cmd.lock);
2696
2697 /* Initialize device management tag acquire wait queue */
2698 init_waitqueue_head(&hba->dev_cmd.tag_wq);
2699
1747 /* IRQ registration */ 2700 /* IRQ registration */
1748 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 2701 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
1749 if (err) { 2702 if (err) {
@@ -1773,6 +2726,9 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
1773 2726
1774 *hba_handle = hba; 2727 *hba_handle = hba;
1775 2728
2729 /* Hold auto suspend until async scan completes */
2730 pm_runtime_get_sync(dev);
2731
1776 async_schedule(ufshcd_async_scan, hba); 2732 async_schedule(ufshcd_async_scan, hba);
1777 2733
1778 return 0; 2734 return 0;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 49590ee07acc..59c9c4848be1 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -68,6 +68,11 @@
68#define UFSHCD "ufshcd" 68#define UFSHCD "ufshcd"
69#define UFSHCD_DRIVER_VERSION "0.2" 69#define UFSHCD_DRIVER_VERSION "0.2"
70 70
71enum dev_cmd_type {
72 DEV_CMD_TYPE_NOP = 0x0,
73 DEV_CMD_TYPE_QUERY = 0x1,
74};
75
71/** 76/**
72 * struct uic_command - UIC command structure 77 * struct uic_command - UIC command structure
73 * @command: UIC command 78 * @command: UIC command
@@ -91,7 +96,7 @@ struct uic_command {
91/** 96/**
92 * struct ufshcd_lrb - local reference block 97 * struct ufshcd_lrb - local reference block
93 * @utr_descriptor_ptr: UTRD address of the command 98 * @utr_descriptor_ptr: UTRD address of the command
94 * @ucd_cmd_ptr: UCD address of the command 99 * @ucd_req_ptr: UCD address of the command
95 * @ucd_rsp_ptr: Response UPIU address for this command 100 * @ucd_rsp_ptr: Response UPIU address for this command
96 * @ucd_prdt_ptr: PRDT address of the command 101 * @ucd_prdt_ptr: PRDT address of the command
97 * @cmd: pointer to SCSI command 102 * @cmd: pointer to SCSI command
@@ -101,10 +106,11 @@ struct uic_command {
101 * @command_type: SCSI, UFS, Query. 106 * @command_type: SCSI, UFS, Query.
102 * @task_tag: Task tag of the command 107 * @task_tag: Task tag of the command
103 * @lun: LUN of the command 108 * @lun: LUN of the command
109 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
104 */ 110 */
105struct ufshcd_lrb { 111struct ufshcd_lrb {
106 struct utp_transfer_req_desc *utr_descriptor_ptr; 112 struct utp_transfer_req_desc *utr_descriptor_ptr;
107 struct utp_upiu_cmd *ucd_cmd_ptr; 113 struct utp_upiu_req *ucd_req_ptr;
108 struct utp_upiu_rsp *ucd_rsp_ptr; 114 struct utp_upiu_rsp *ucd_rsp_ptr;
109 struct ufshcd_sg_entry *ucd_prdt_ptr; 115 struct ufshcd_sg_entry *ucd_prdt_ptr;
110 116
@@ -116,8 +122,35 @@ struct ufshcd_lrb {
116 int command_type; 122 int command_type;
117 int task_tag; 123 int task_tag;
118 unsigned int lun; 124 unsigned int lun;
125 bool intr_cmd;
119}; 126};
120 127
128/**
129 * struct ufs_query - holds relevent data structures for query request
130 * @request: request upiu and function
131 * @descriptor: buffer for sending/receiving descriptor
132 * @response: response upiu and response
133 */
134struct ufs_query {
135 struct ufs_query_req request;
136 u8 *descriptor;
137 struct ufs_query_res response;
138};
139
140/**
141 * struct ufs_dev_cmd - all assosiated fields with device management commands
142 * @type: device management command type - Query, NOP OUT
143 * @lock: lock to allow one command at a time
144 * @complete: internal commands completion
145 * @tag_wq: wait queue until free command slot is available
146 */
147struct ufs_dev_cmd {
148 enum dev_cmd_type type;
149 struct mutex lock;
150 struct completion *complete;
151 wait_queue_head_t tag_wq;
152 struct ufs_query query;
153};
121 154
122/** 155/**
123 * struct ufs_hba - per adapter private structure 156 * struct ufs_hba - per adapter private structure
@@ -131,6 +164,7 @@ struct ufshcd_lrb {
131 * @host: Scsi_Host instance of the driver 164 * @host: Scsi_Host instance of the driver
132 * @dev: device handle 165 * @dev: device handle
133 * @lrb: local reference block 166 * @lrb: local reference block
167 * @lrb_in_use: lrb in use
134 * @outstanding_tasks: Bits representing outstanding task requests 168 * @outstanding_tasks: Bits representing outstanding task requests
135 * @outstanding_reqs: Bits representing outstanding transfer requests 169 * @outstanding_reqs: Bits representing outstanding transfer requests
136 * @capabilities: UFS Controller Capabilities 170 * @capabilities: UFS Controller Capabilities
@@ -144,8 +178,12 @@ struct ufshcd_lrb {
144 * @tm_condition: condition variable for task management 178 * @tm_condition: condition variable for task management
145 * @ufshcd_state: UFSHCD states 179 * @ufshcd_state: UFSHCD states
146 * @intr_mask: Interrupt Mask Bits 180 * @intr_mask: Interrupt Mask Bits
181 * @ee_ctrl_mask: Exception event control mask
147 * @feh_workq: Work queue for fatal controller error handling 182 * @feh_workq: Work queue for fatal controller error handling
183 * @eeh_work: Worker to handle exception events
148 * @errors: HBA errors 184 * @errors: HBA errors
185 * @dev_cmd: ufs device management command information
186 * @auto_bkops_enabled: to track whether bkops is enabled in device
149 */ 187 */
150struct ufs_hba { 188struct ufs_hba {
151 void __iomem *mmio_base; 189 void __iomem *mmio_base;
@@ -164,6 +202,7 @@ struct ufs_hba {
164 struct device *dev; 202 struct device *dev;
165 203
166 struct ufshcd_lrb *lrb; 204 struct ufshcd_lrb *lrb;
205 unsigned long lrb_in_use;
167 206
168 unsigned long outstanding_tasks; 207 unsigned long outstanding_tasks;
169 unsigned long outstanding_reqs; 208 unsigned long outstanding_reqs;
@@ -182,12 +221,19 @@ struct ufs_hba {
182 221
183 u32 ufshcd_state; 222 u32 ufshcd_state;
184 u32 intr_mask; 223 u32 intr_mask;
224 u16 ee_ctrl_mask;
185 225
186 /* Work Queues */ 226 /* Work Queues */
187 struct work_struct feh_workq; 227 struct work_struct feh_workq;
228 struct work_struct eeh_work;
188 229
189 /* HBA Errors */ 230 /* HBA Errors */
190 u32 errors; 231 u32 errors;
232
233 /* Device management request data */
234 struct ufs_dev_cmd dev_cmd;
235
236 bool auto_bkops_enabled;
191}; 237};
192 238
193#define ufshcd_writel(hba, val, reg) \ 239#define ufshcd_writel(hba, val, reg) \
@@ -208,4 +254,13 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
208 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); 254 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
209} 255}
210 256
257static inline void check_upiu_size(void)
258{
259 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
260 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
261}
262
263extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
264extern int ufshcd_runtime_resume(struct ufs_hba *hba);
265extern int ufshcd_runtime_idle(struct ufs_hba *hba);
211#endif /* End of Header */ 266#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d5c5f1482d7d..f1e1b7459107 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,7 +39,7 @@
39enum { 39enum {
40 TASK_REQ_UPIU_SIZE_DWORDS = 8, 40 TASK_REQ_UPIU_SIZE_DWORDS = 8,
41 TASK_RSP_UPIU_SIZE_DWORDS = 8, 41 TASK_RSP_UPIU_SIZE_DWORDS = 8,
42 ALIGNED_UPIU_SIZE = 128, 42 ALIGNED_UPIU_SIZE = 512,
43}; 43};
44 44
45/* UFSHCI Registers */ 45/* UFSHCI Registers */