aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:53:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-14 20:53:36 -0400
commit39695224bd84dc4be29abad93a0ec232a16fc519 (patch)
tree2bfa5cb50788a4c8be9f2e9f4412e47a565f4508 /drivers
parenta9bbd210a44102cc50b30a5f3d111dbf5f2f9cd4 (diff)
parentea038f63ac52439e7816295fa6064fe95e6c1f51 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (209 commits) [SCSI] fix oops during scsi scanning [SCSI] libsrp: fix memory leak in srp_ring_free() [SCSI] libiscsi, bnx2i: make bound ep check common [SCSI] libiscsi: add completion function for drivers that do not need pdu processing [SCSI] scsi_dh_rdac: changes for rdac debug logging [SCSI] scsi_dh_rdac: changes to collect the rdac debug information during the initialization [SCSI] scsi_dh_rdac: move the init code from rdac_activate to rdac_bus_attach [SCSI] sg: fix oops in the error path in sg_build_indirect() [SCSI] mptsas : Bump version to 3.04.12 [SCSI] mptsas : FW event thread and scsi mid layer deadlock in SYNCHRONIZE CACHE command [SCSI] mptsas : Send DID_NO_CONNECT for pending IOs of removed device [SCSI] mptsas : PAE Kernel more than 4 GB kernel panic [SCSI] mptsas : NULL pointer on big endian systems causing Expander not to tear off [SCSI] mptsas : Sanity check for phyinfo is added [SCSI] scsi_dh_rdac: Add support for Sun StorageTek ST2500, ST2510 and ST2530 [SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller [SCSI] qla2xxx: Update version number to 8.03.01-k6. [SCSI] qla2xxx: Properly delete rports attached to a vport. [SCSI] qla2xxx: Correct various NPIV issues. [SCSI] qla2xxx: Correct qla2x00_eh_wait_on_command() to wait correctly. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-mpath.c42
-rw-r--r--drivers/message/fusion/mptbase.c94
-rw-r--r--drivers/message/fusion/mptbase.h21
-rw-r--r--drivers/message/fusion/mptfc.c19
-rw-r--r--drivers/message/fusion/mptsas.c62
-rw-r--r--drivers/message/fusion/mptscsih.c67
-rw-r--r--drivers/message/fusion/mptscsih.h1
-rw-r--r--drivers/message/fusion/mptspi.c21
-rw-r--r--drivers/misc/enclosure.c69
-rw-r--r--drivers/s390/scsi/zfcp_aux.c288
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c94
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c544
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h175
-rw-r--r--drivers/s390/scsi/zfcp_def.h183
-rw-r--r--drivers/s390/scsi/zfcp_erp.c155
-rw-r--r--drivers/s390/scsi/zfcp_ext.h102
-rw-r--r--drivers/s390/scsi/zfcp_fc.c176
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c635
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h3
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c369
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c73
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c34
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c100
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/ch.c6
-rw-r--r--drivers/scsi/constants.c95
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c56
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c59
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c116
-rw-r--r--drivers/scsi/fcoe/fcoe.c1078
-rw-r--r--drivers/scsi/fcoe/fcoe.h36
-rw-r--r--drivers/scsi/fcoe/libfcoe.c30
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c20
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c31
-rw-r--r--drivers/scsi/libfc/fc_disc.c523
-rw-r--r--drivers/scsi/libfc/fc_elsct.c49
-rw-r--r--drivers/scsi/libfc/fc_exch.c515
-rw-r--r--drivers/scsi/libfc/fc_fcp.c31
-rw-r--r--drivers/scsi/libfc/fc_lport.c283
-rw-r--r--drivers/scsi/libfc/fc_rport.c1144
-rw-r--r--drivers/scsi/libiscsi.c201
-rw-r--r--drivers/scsi/libsrp.c1
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c904
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c259
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h74
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c263
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c53
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c96
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h51
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c904
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c16
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c252
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c33
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pmcraid.c5604
-rw-r--r--drivers/scsi/pmcraid.h1029
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c239
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c309
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c141
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c8
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c73
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/ses.c209
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/stex.c33
99 files changed, 14174 insertions, 4714 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6f0d90d4a541..32d0b878eccc 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -64,6 +64,7 @@ struct multipath {
64 spinlock_t lock; 64 spinlock_t lock;
65 65
66 const char *hw_handler_name; 66 const char *hw_handler_name;
67 char *hw_handler_params;
67 unsigned nr_priority_groups; 68 unsigned nr_priority_groups;
68 struct list_head priority_groups; 69 struct list_head priority_groups;
69 unsigned pg_init_required; /* pg_init needs calling? */ 70 unsigned pg_init_required; /* pg_init needs calling? */
@@ -219,6 +220,7 @@ static void free_multipath(struct multipath *m)
219 } 220 }
220 221
221 kfree(m->hw_handler_name); 222 kfree(m->hw_handler_name);
223 kfree(m->hw_handler_params);
222 mempool_destroy(m->mpio_pool); 224 mempool_destroy(m->mpio_pool);
223 kfree(m); 225 kfree(m);
224} 226}
@@ -615,6 +617,17 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
615 dm_put_device(ti, p->path.dev); 617 dm_put_device(ti, p->path.dev);
616 goto bad; 618 goto bad;
617 } 619 }
620
621 if (m->hw_handler_params) {
622 r = scsi_dh_set_params(q, m->hw_handler_params);
623 if (r < 0) {
624 ti->error = "unable to set hardware "
625 "handler parameters";
626 scsi_dh_detach(q);
627 dm_put_device(ti, p->path.dev);
628 goto bad;
629 }
630 }
618 } 631 }
619 632
620 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); 633 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
@@ -705,6 +718,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
705static int parse_hw_handler(struct arg_set *as, struct multipath *m) 718static int parse_hw_handler(struct arg_set *as, struct multipath *m)
706{ 719{
707 unsigned hw_argc; 720 unsigned hw_argc;
721 int ret;
708 struct dm_target *ti = m->ti; 722 struct dm_target *ti = m->ti;
709 723
710 static struct param _params[] = { 724 static struct param _params[] = {
@@ -726,17 +740,33 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
726 request_module("scsi_dh_%s", m->hw_handler_name); 740 request_module("scsi_dh_%s", m->hw_handler_name);
727 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { 741 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
728 ti->error = "unknown hardware handler type"; 742 ti->error = "unknown hardware handler type";
729 kfree(m->hw_handler_name); 743 ret = -EINVAL;
730 m->hw_handler_name = NULL; 744 goto fail;
731 return -EINVAL;
732 } 745 }
733 746
734 if (hw_argc > 1) 747 if (hw_argc > 1) {
735 DMWARN("Ignoring user-specified arguments for " 748 char *p;
736 "hardware handler \"%s\"", m->hw_handler_name); 749 int i, j, len = 4;
750
751 for (i = 0; i <= hw_argc - 2; i++)
752 len += strlen(as->argv[i]) + 1;
753 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
754 if (!p) {
755 ti->error = "memory allocation failed";
756 ret = -ENOMEM;
757 goto fail;
758 }
759 j = sprintf(p, "%d", hw_argc - 1);
760 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
761 j = sprintf(p, "%s", as->argv[i]);
762 }
737 consume(as, hw_argc - 1); 763 consume(as, hw_argc - 1);
738 764
739 return 0; 765 return 0;
766fail:
767 kfree(m->hw_handler_name);
768 m->hw_handler_name = NULL;
769 return ret;
740} 770}
741 771
742static int parse_features(struct arg_set *as, struct multipath *m) 772static int parse_features(struct arg_set *as, struct multipath *m)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5d0ba4f5924c..76fa2ee0b574 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1015,9 +1015,9 @@ mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1015{ 1015{
1016 SGESimple64_t *pSge = (SGESimple64_t *) pAddr; 1016 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1017 pSge->Address.Low = cpu_to_le32 1017 pSge->Address.Low = cpu_to_le32
1018 (lower_32_bits((unsigned long)(dma_addr))); 1018 (lower_32_bits(dma_addr));
1019 pSge->Address.High = cpu_to_le32 1019 pSge->Address.High = cpu_to_le32
1020 (upper_32_bits((unsigned long)dma_addr)); 1020 (upper_32_bits(dma_addr));
1021 pSge->FlagsLength = cpu_to_le32 1021 pSge->FlagsLength = cpu_to_le32
1022 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); 1022 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1023} 1023}
@@ -1038,8 +1038,8 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1038 u32 tmp; 1038 u32 tmp;
1039 1039
1040 pSge->Address.Low = cpu_to_le32 1040 pSge->Address.Low = cpu_to_le32
1041 (lower_32_bits((unsigned long)(dma_addr))); 1041 (lower_32_bits(dma_addr));
1042 tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); 1042 tmp = (u32)(upper_32_bits(dma_addr));
1043 1043
1044 /* 1044 /*
1045 * 1078 errata workaround for the 36GB limitation 1045 * 1078 errata workaround for the 36GB limitation
@@ -1101,7 +1101,7 @@ mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1101 pChain->NextChainOffset = next; 1101 pChain->NextChainOffset = next;
1102 1102
1103 pChain->Address.Low = cpu_to_le32(tmp); 1103 pChain->Address.Low = cpu_to_le32(tmp);
1104 tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); 1104 tmp = (u32)(upper_32_bits(dma_addr));
1105 pChain->Address.High = cpu_to_le32(tmp); 1105 pChain->Address.High = cpu_to_le32(tmp);
1106} 1106}
1107 1107
@@ -1297,12 +1297,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1297 psge = (char *)&ioc_init->HostPageBufferSGE; 1297 psge = (char *)&ioc_init->HostPageBufferSGE;
1298 flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1298 flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1299 MPI_SGE_FLAGS_SYSTEM_ADDRESS | 1299 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
1300 MPI_SGE_FLAGS_32_BIT_ADDRESSING |
1301 MPI_SGE_FLAGS_HOST_TO_IOC | 1300 MPI_SGE_FLAGS_HOST_TO_IOC |
1302 MPI_SGE_FLAGS_END_OF_BUFFER; 1301 MPI_SGE_FLAGS_END_OF_BUFFER;
1303 if (sizeof(dma_addr_t) == sizeof(u64)) {
1304 flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1305 }
1306 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; 1302 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1307 flags_length |= ioc->HostPageBuffer_sz; 1303 flags_length |= ioc->HostPageBuffer_sz;
1308 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma); 1304 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
@@ -2224,8 +2220,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2224 int hard; 2220 int hard;
2225 int rc=0; 2221 int rc=0;
2226 int ii; 2222 int ii;
2227 u8 cb_idx;
2228 int handlers;
2229 int ret = 0; 2223 int ret = 0;
2230 int reset_alt_ioc_active = 0; 2224 int reset_alt_ioc_active = 0;
2231 int irq_allocated = 0; 2225 int irq_allocated = 0;
@@ -2548,34 +2542,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2548 mpt_get_manufacturing_pg_0(ioc); 2542 mpt_get_manufacturing_pg_0(ioc);
2549 } 2543 }
2550 2544
2551 /*
2552 * Call each currently registered protocol IOC reset handler
2553 * with post-reset indication.
2554 * NOTE: If we're doing _IOC_BRINGUP, there can be no
2555 * MptResetHandlers[] registered yet.
2556 */
2557 if (hard_reset_done) {
2558 rc = handlers = 0;
2559 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
2560 if ((ret == 0) && MptResetHandlers[cb_idx]) {
2561 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2562 "Calling IOC post_reset handler #%d\n",
2563 ioc->name, cb_idx));
2564 rc += mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
2565 handlers++;
2566 }
2567
2568 if (alt_ioc_ready && MptResetHandlers[cb_idx]) {
2569 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2570 "Calling IOC post_reset handler #%d\n",
2571 ioc->alt_ioc->name, cb_idx));
2572 rc += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_POST_RESET);
2573 handlers++;
2574 }
2575 }
2576 /* FIXME? Examine results here? */
2577 }
2578
2579 out: 2545 out:
2580 if ((ret != 0) && irq_allocated) { 2546 if ((ret != 0) && irq_allocated) {
2581 free_irq(ioc->pci_irq, ioc); 2547 free_irq(ioc->pci_irq, ioc);
@@ -3938,6 +3904,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3938 int count = 0; 3904 int count = 0;
3939 u32 diag1val = 0; 3905 u32 diag1val = 0;
3940 MpiFwHeader_t *cached_fw; /* Pointer to FW */ 3906 MpiFwHeader_t *cached_fw; /* Pointer to FW */
3907 u8 cb_idx;
3941 3908
3942 /* Clear any existing interrupts */ 3909 /* Clear any existing interrupts */
3943 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 3910 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
@@ -3956,6 +3923,18 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3956 else 3923 else
3957 mdelay(1); 3924 mdelay(1);
3958 3925
3926 /*
3927 * Call each currently registered protocol IOC reset handler
3928 * with pre-reset indication.
3929 * NOTE: If we're doing _IOC_BRINGUP, there can be no
3930 * MptResetHandlers[] registered yet.
3931 */
3932 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
3933 if (MptResetHandlers[cb_idx])
3934 (*(MptResetHandlers[cb_idx]))(ioc,
3935 MPT_IOC_PRE_RESET);
3936 }
3937
3959 for (count = 0; count < 60; count ++) { 3938 for (count = 0; count < 60; count ++) {
3960 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); 3939 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3961 doorbell &= MPI_IOC_STATE_MASK; 3940 doorbell &= MPI_IOC_STATE_MASK;
@@ -4052,25 +4031,15 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
4052 * NOTE: If we're doing _IOC_BRINGUP, there can be no 4031 * NOTE: If we're doing _IOC_BRINGUP, there can be no
4053 * MptResetHandlers[] registered yet. 4032 * MptResetHandlers[] registered yet.
4054 */ 4033 */
4055 { 4034 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
4056 u8 cb_idx; 4035 if (MptResetHandlers[cb_idx]) {
4057 int r = 0; 4036 mpt_signal_reset(cb_idx,
4058 4037 ioc, MPT_IOC_PRE_RESET);
4059 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 4038 if (ioc->alt_ioc) {
4060 if (MptResetHandlers[cb_idx]) { 4039 mpt_signal_reset(cb_idx,
4061 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4040 ioc->alt_ioc, MPT_IOC_PRE_RESET);
4062 "Calling IOC pre_reset handler #%d\n",
4063 ioc->name, cb_idx));
4064 r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
4065 if (ioc->alt_ioc) {
4066 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4067 "Calling alt-%s pre_reset handler #%d\n",
4068 ioc->name, ioc->alt_ioc->name, cb_idx));
4069 r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_PRE_RESET);
4070 }
4071 } 4041 }
4072 } 4042 }
4073 /* FIXME? Examine results here? */
4074 } 4043 }
4075 4044
4076 if (ioc->cached_fw) 4045 if (ioc->cached_fw)
@@ -6956,7 +6925,7 @@ EXPORT_SYMBOL(mpt_halt_firmware);
6956int 6925int
6957mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) 6926mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6958{ 6927{
6959 int rc; 6928 int rc;
6960 u8 cb_idx; 6929 u8 cb_idx;
6961 unsigned long flags; 6930 unsigned long flags;
6962 unsigned long time_count; 6931 unsigned long time_count;
@@ -6982,8 +6951,6 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6982 ioc->alt_ioc->ioc_reset_in_progress = 1; 6951 ioc->alt_ioc->ioc_reset_in_progress = 1;
6983 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6952 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6984 6953
6985 /* FIXME: If do_ioc_recovery fails, repeat....
6986 */
6987 6954
6988 /* The SCSI driver needs to adjust timeouts on all current 6955 /* The SCSI driver needs to adjust timeouts on all current
6989 * commands prior to the diagnostic reset being issued. 6956 * commands prior to the diagnostic reset being issued.
@@ -7020,6 +6987,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
7020 } 6987 }
7021 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6988 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
7022 6989
6990 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6991 if (MptResetHandlers[cb_idx]) {
6992 mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
6993 if (ioc->alt_ioc)
6994 mpt_signal_reset(cb_idx,
6995 ioc->alt_ioc, MPT_IOC_POST_RESET);
6996 }
6997 }
6998
7023 dtmprintk(ioc, 6999 dtmprintk(ioc,
7024 printk(MYIOC_s_DEBUG_FMT 7000 printk(MYIOC_s_DEBUG_FMT
7025 "HardResetHandler: completed (%d seconds): %s\n", ioc->name, 7001 "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 1c8514dc31ca..8dd4d219e433 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.10" 79#define MPT_LINUX_VERSION_COMMON "3.04.12"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.12"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -157,8 +157,9 @@
157/* 157/*
158 * Try to keep these at 2^N-1 158 * Try to keep these at 2^N-1
159 */ 159 */
160#define MPT_FC_CAN_QUEUE 127 160#define MPT_FC_CAN_QUEUE 1024
161#define MPT_SCSI_CAN_QUEUE 127 161#define MPT_SCSI_CAN_QUEUE 127
162#define MPT_SAS_CAN_QUEUE 127
162 163
163/* 164/*
164 * Set the MAX_SGE value based on user input. 165 * Set the MAX_SGE value based on user input.
@@ -879,23 +880,9 @@ typedef enum {
879 880
880typedef struct _MPT_SCSI_HOST { 881typedef struct _MPT_SCSI_HOST {
881 MPT_ADAPTER *ioc; 882 MPT_ADAPTER *ioc;
882 int port;
883 u32 pad0;
884 MPT_LOCAL_REPLY *pLocal; /* used for internal commands */
885 struct timer_list timer;
886 /* Pool of memory for holding SCpnts before doing
887 * OS callbacks. freeQ is the free pool.
888 */
889 u8 negoNvram; /* DV disabled, nego NVRAM */
890 u8 pad1;
891 u8 rsvd[2];
892 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
893 struct scsi_cmnd *abortSCpnt;
894 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
895 ushort sel_timeout[MPT_MAX_FC_DEVICES]; 883 ushort sel_timeout[MPT_MAX_FC_DEVICES];
896 char *info_kbuf; 884 char *info_kbuf;
897 long last_queue_full; 885 long last_queue_full;
898 u16 tm_iocstatus;
899 u16 spi_pending; 886 u16 spi_pending;
900 struct list_head target_reset_list; 887 struct list_head target_reset_list;
901} MPT_SCSI_HOST; 888} MPT_SCSI_HOST;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index e61df133a59e..ebf6ae024da4 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1288,25 +1288,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1288 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", 1288 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
1289 ioc->name, ioc->ScsiLookup)); 1289 ioc->name, ioc->ScsiLookup));
1290 1290
1291 /* Clear the TM flags
1292 */
1293 hd->abortSCpnt = NULL;
1294
1295 /* Clear the pointer used to store
1296 * single-threaded commands, i.e., those
1297 * issued during a bus scan, dv and
1298 * configuration pages.
1299 */
1300 hd->cmdPtr = NULL;
1301
1302 /* Initialize this SCSI Hosts' timers
1303 * To use, set the timer expires field
1304 * and add_timer
1305 */
1306 init_timer(&hd->timer);
1307 hd->timer.data = (unsigned long) hd;
1308 hd->timer.function = mptscsih_timer_expired;
1309
1310 hd->last_queue_full = 0; 1291 hd->last_queue_full = 0;
1311 1292
1312 sh->transportt = mptfc_transport_template; 1293 sh->transportt = mptfc_transport_template;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 55ff25244af4..83873e3d0ce7 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -72,6 +72,7 @@
72 */ 72 */
73#define MPTSAS_RAID_CHANNEL 1 73#define MPTSAS_RAID_CHANNEL 1
74 74
75#define SAS_CONFIG_PAGE_TIMEOUT 30
75MODULE_AUTHOR(MODULEAUTHOR); 76MODULE_AUTHOR(MODULEAUTHOR);
76MODULE_DESCRIPTION(my_NAME); 77MODULE_DESCRIPTION(my_NAME);
77MODULE_LICENSE("GPL"); 78MODULE_LICENSE("GPL");
@@ -324,7 +325,6 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
324{ 325{
325 struct fw_event_work *fw_event, *next; 326 struct fw_event_work *fw_event, *next;
326 struct mptsas_target_reset_event *target_reset_list, *n; 327 struct mptsas_target_reset_event *target_reset_list, *n;
327 u8 flush_q;
328 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 328 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
329 329
330 /* flush the target_reset_list */ 330 /* flush the target_reset_list */
@@ -344,15 +344,10 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
344 !ioc->fw_event_q || in_interrupt()) 344 !ioc->fw_event_q || in_interrupt())
345 return; 345 return;
346 346
347 flush_q = 0;
348 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { 347 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
349 if (cancel_delayed_work(&fw_event->work)) 348 if (cancel_delayed_work(&fw_event->work))
350 mptsas_free_fw_event(ioc, fw_event); 349 mptsas_free_fw_event(ioc, fw_event);
351 else
352 flush_q = 1;
353 } 350 }
354 if (flush_q)
355 flush_workqueue(ioc->fw_event_q);
356} 351}
357 352
358 353
@@ -661,7 +656,7 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
661 cfg.pageAddr = starget->id; 656 cfg.pageAddr = starget->id;
662 cfg.cfghdr.hdr = &hdr; 657 cfg.cfghdr.hdr = &hdr;
663 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 658 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
664 cfg.timeout = 10; 659 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
665 660
666 if (mpt_config(ioc, &cfg) != 0) 661 if (mpt_config(ioc, &cfg) != 0)
667 goto out; 662 goto out;
@@ -851,7 +846,13 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
851 port_details->num_phys--; 846 port_details->num_phys--;
852 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 847 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
853 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 848 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
854 sas_port_delete_phy(port_details->port, phy_info->phy); 849 if (phy_info->phy) {
850 devtprintk(ioc, dev_printk(KERN_DEBUG,
851 &phy_info->phy->dev, MYIOC_s_FMT
852 "delete phy %d, phy-obj (0x%p)\n", ioc->name,
853 phy_info->phy_id, phy_info->phy));
854 sas_port_delete_phy(port_details->port, phy_info->phy);
855 }
855 phy_info->port_details = NULL; 856 phy_info->port_details = NULL;
856 } 857 }
857 858
@@ -1272,7 +1273,6 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
1272 } 1273 }
1273 mptsas_cleanup_fw_event_q(ioc); 1274 mptsas_cleanup_fw_event_q(ioc);
1274 mptsas_queue_rescan(ioc); 1275 mptsas_queue_rescan(ioc);
1275 mptsas_fw_event_on(ioc);
1276 break; 1276 break;
1277 default: 1277 default:
1278 break; 1278 break;
@@ -1318,7 +1318,7 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
1318 cfg.pageAddr = form + form_specific; 1318 cfg.pageAddr = form + form_specific;
1319 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 1319 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
1320 cfg.dir = 0; /* read */ 1320 cfg.dir = 0; /* read */
1321 cfg.timeout = 10; 1321 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
1322 1322
1323 error = mpt_config(ioc, &cfg); 1323 error = mpt_config(ioc, &cfg);
1324 if (error) 1324 if (error)
@@ -1592,6 +1592,7 @@ mptsas_firmware_event_work(struct work_struct *work)
1592 mptsas_scan_sas_topology(ioc); 1592 mptsas_scan_sas_topology(ioc);
1593 ioc->in_rescan = 0; 1593 ioc->in_rescan = 0;
1594 mptsas_free_fw_event(ioc, fw_event); 1594 mptsas_free_fw_event(ioc, fw_event);
1595 mptsas_fw_event_on(ioc);
1595 return; 1596 return;
1596 } 1597 }
1597 1598
@@ -1891,7 +1892,7 @@ static struct scsi_host_template mptsas_driver_template = {
1891 .eh_bus_reset_handler = mptscsih_bus_reset, 1892 .eh_bus_reset_handler = mptscsih_bus_reset,
1892 .eh_host_reset_handler = mptscsih_host_reset, 1893 .eh_host_reset_handler = mptscsih_host_reset,
1893 .bios_param = mptscsih_bios_param, 1894 .bios_param = mptscsih_bios_param,
1894 .can_queue = MPT_FC_CAN_QUEUE, 1895 .can_queue = MPT_SAS_CAN_QUEUE,
1895 .this_id = -1, 1896 .this_id = -1,
1896 .sg_tablesize = MPT_SCSI_SG_DEPTH, 1897 .sg_tablesize = MPT_SCSI_SG_DEPTH,
1897 .max_sectors = 8192, 1898 .max_sectors = 8192,
@@ -1926,7 +1927,7 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
1926 cfg.pageAddr = phy->identify.phy_identifier; 1927 cfg.pageAddr = phy->identify.phy_identifier;
1927 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 1928 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
1928 cfg.dir = 0; /* read */ 1929 cfg.dir = 0; /* read */
1929 cfg.timeout = 10; 1930 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
1930 1931
1931 error = mpt_config(ioc, &cfg); 1932 error = mpt_config(ioc, &cfg);
1932 if (error) 1933 if (error)
@@ -2278,7 +2279,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
2278 cfg.pageAddr = 0; 2279 cfg.pageAddr = 0;
2279 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 2280 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2280 cfg.dir = 0; /* read */ 2281 cfg.dir = 0; /* read */
2281 cfg.timeout = 10; 2282 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
2282 2283
2283 error = mpt_config(ioc, &cfg); 2284 error = mpt_config(ioc, &cfg);
2284 if (error) 2285 if (error)
@@ -2349,7 +2350,7 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
2349 2350
2350 cfg.cfghdr.ehdr = &hdr; 2351 cfg.cfghdr.ehdr = &hdr;
2351 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 2352 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2352 cfg.timeout = 10; 2353 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
2353 cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 2354 cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
2354 cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; 2355 cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
2355 cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION; 2356 cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
@@ -2411,7 +2412,7 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
2411 2412
2412 cfg.cfghdr.ehdr = &hdr; 2413 cfg.cfghdr.ehdr = &hdr;
2413 cfg.dir = 0; /* read */ 2414 cfg.dir = 0; /* read */
2414 cfg.timeout = 10; 2415 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
2415 2416
2416 /* Get Phy Pg 0 for each Phy. */ 2417 /* Get Phy Pg 0 for each Phy. */
2417 cfg.physAddr = -1; 2418 cfg.physAddr = -1;
@@ -2479,7 +2480,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
2479 cfg.physAddr = -1; 2480 cfg.physAddr = -1;
2480 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 2481 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2481 cfg.dir = 0; /* read */ 2482 cfg.dir = 0; /* read */
2482 cfg.timeout = 10; 2483 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
2483 2484
2484 memset(device_info, 0, sizeof(struct mptsas_devinfo)); 2485 memset(device_info, 0, sizeof(struct mptsas_devinfo));
2485 error = mpt_config(ioc, &cfg); 2486 error = mpt_config(ioc, &cfg);
@@ -2554,7 +2555,7 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
2554 cfg.pageAddr = form + form_specific; 2555 cfg.pageAddr = form + form_specific;
2555 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 2556 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2556 cfg.dir = 0; /* read */ 2557 cfg.dir = 0; /* read */
2557 cfg.timeout = 10; 2558 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
2558 2559
2559 memset(port_info, 0, sizeof(struct mptsas_portinfo)); 2560 memset(port_info, 0, sizeof(struct mptsas_portinfo));
2560 error = mpt_config(ioc, &cfg); 2561 error = mpt_config(ioc, &cfg);
@@ -2635,7 +2636,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
2635 cfg.pageAddr = form + form_specific; 2636 cfg.pageAddr = form + form_specific;
2636 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 2637 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2637 cfg.dir = 0; /* read */ 2638 cfg.dir = 0; /* read */
2638 cfg.timeout = 10; 2639 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
2639 2640
2640 error = mpt_config(ioc, &cfg); 2641 error = mpt_config(ioc, &cfg);
2641 if (error) 2642 if (error)
@@ -3307,6 +3308,7 @@ mptsas_send_expander_event(struct fw_event_work *fw_event)
3307 expander_data = (MpiEventDataSasExpanderStatusChange_t *) 3308 expander_data = (MpiEventDataSasExpanderStatusChange_t *)
3308 fw_event->event_data; 3309 fw_event->event_data;
3309 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64)); 3310 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3311 sas_address = le64_to_cpu(sas_address);
3310 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address); 3312 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3311 3313
3312 if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) { 3314 if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
@@ -4760,10 +4762,9 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4760 4762
4761 /* set 16 byte cdb's */ 4763 /* set 16 byte cdb's */
4762 sh->max_cmd_len = 16; 4764 sh->max_cmd_len = 16;
4763 4765 sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue);
4764 sh->max_id = ioc->pfacts[0].PortSCSIID; 4766 sh->max_id = -1;
4765 sh->max_lun = max_lun; 4767 sh->max_lun = max_lun;
4766
4767 sh->transportt = mptsas_transport_template; 4768 sh->transportt = mptsas_transport_template;
4768 4769
4769 /* Required entry. 4770 /* Required entry.
@@ -4821,25 +4822,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4821 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", 4822 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
4822 ioc->name, ioc->ScsiLookup)); 4823 ioc->name, ioc->ScsiLookup));
4823 4824
4824 /* Clear the TM flags
4825 */
4826 hd->abortSCpnt = NULL;
4827
4828 /* Clear the pointer used to store
4829 * single-threaded commands, i.e., those
4830 * issued during a bus scan, dv and
4831 * configuration pages.
4832 */
4833 hd->cmdPtr = NULL;
4834
4835 /* Initialize this SCSI Hosts' timers
4836 * To use, set the timer expires field
4837 * and add_timer
4838 */
4839 init_timer(&hd->timer);
4840 hd->timer.data = (unsigned long) hd;
4841 hd->timer.function = mptscsih_timer_expired;
4842
4843 ioc->sas_data.ptClear = mpt_pt_clear; 4825 ioc->sas_data.ptClear = mpt_pt_clear;
4844 4826
4845 hd->last_queue_full = 0; 4827 hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 8440f78f6969..c29578614504 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -628,6 +628,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
628 return 1; 628 return 1;
629 } 629 }
630 630
631 if (ioc->bus_type == SAS) {
632 VirtDevice *vdevice = sc->device->hostdata;
633
634 if (!vdevice || !vdevice->vtarget ||
635 vdevice->vtarget->deleted) {
636 sc->result = DID_NO_CONNECT << 16;
637 goto out;
638 }
639 }
640
631 sc->host_scribble = NULL; 641 sc->host_scribble = NULL;
632 sc->result = DID_OK << 16; /* Set default reply as OK */ 642 sc->result = DID_OK << 16; /* Set default reply as OK */
633 pScsiReq = (SCSIIORequest_t *) mf; 643 pScsiReq = (SCSIIORequest_t *) mf;
@@ -689,6 +699,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
689 699
690 switch(status) { 700 switch(status) {
691 case MPI_IOCSTATUS_BUSY: /* 0x0002 */ 701 case MPI_IOCSTATUS_BUSY: /* 0x0002 */
702 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
692 /* CHECKME! 703 /* CHECKME!
693 * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry) 704 * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
694 * But not: DID_BUS_BUSY lest one risk 705 * But not: DID_BUS_BUSY lest one risk
@@ -872,7 +883,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
872 case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */ 883 case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
873 case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */ 884 case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
874 case MPI_IOCSTATUS_RESERVED: /* 0x0005 */ 885 case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
875 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
876 case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */ 886 case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
877 case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */ 887 case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
878 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ 888 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
@@ -892,7 +902,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
892#endif 902#endif
893 903
894 } /* end of address reply case */ 904 } /* end of address reply case */
895 905out:
896 /* Unmap the DMA buffers, if any. */ 906 /* Unmap the DMA buffers, if any. */
897 scsi_dma_unmap(sc); 907 scsi_dma_unmap(sc);
898 908
@@ -1729,9 +1739,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1729 */ 1739 */
1730 mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx); 1740 mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx);
1731 ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext; 1741 ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
1732
1733 hd->abortSCpnt = SCpnt;
1734
1735 retval = mptscsih_IssueTaskMgmt(hd, 1742 retval = mptscsih_IssueTaskMgmt(hd,
1736 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1743 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1737 vdevice->vtarget->channel, 1744 vdevice->vtarget->channel,
@@ -2293,7 +2300,10 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
2293 else 2300 else
2294 max_depth = MPT_SCSI_CMD_PER_DEV_LOW; 2301 max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
2295 } else 2302 } else
2296 max_depth = MPT_SCSI_CMD_PER_DEV_HIGH; 2303 max_depth = ioc->sh->can_queue;
2304
2305 if (!sdev->tagged_supported)
2306 max_depth = 1;
2297 2307
2298 if (qdepth > max_depth) 2308 if (qdepth > max_depth)
2299 qdepth = max_depth; 2309 qdepth = max_depth;
@@ -2627,50 +2637,6 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2627 return 1; 2637 return 1;
2628} 2638}
2629 2639
2630/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2631/* mptscsih_timer_expired - Call back for timer process.
2632 * Used only for dv functionality.
2633 * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
2634 *
2635 */
2636void
2637mptscsih_timer_expired(unsigned long data)
2638{
2639 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data;
2640 MPT_ADAPTER *ioc = hd->ioc;
2641
2642 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired! Cmd %p\n", ioc->name, hd->cmdPtr));
2643
2644 if (hd->cmdPtr) {
2645 MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr;
2646
2647 if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
2648 /* Desire to issue a task management request here.
2649 * TM requests MUST be single threaded.
2650 * If old eh code and no TM current, issue request.
2651 * If new eh code, do nothing. Wait for OS cmd timeout
2652 * for bus reset.
2653 */
2654 } else {
2655 /* Perform a FW reload */
2656 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) {
2657 printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
2658 }
2659 }
2660 } else {
2661 /* This should NEVER happen */
2662 printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", ioc->name);
2663 }
2664
2665 /* No more processing.
2666 * TM call will generate an interrupt for SCSI TM Management.
2667 * The FW will reply to all outstanding commands, callback will finish cleanup.
2668 * Hard reset clean-up will free all resources.
2669 */
2670 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired Complete!\n", ioc->name));
2671
2672 return;
2673}
2674 2640
2675/** 2641/**
2676 * mptscsih_get_completion_code - 2642 * mptscsih_get_completion_code -
@@ -3265,6 +3231,5 @@ EXPORT_SYMBOL(mptscsih_scandv_complete);
3265EXPORT_SYMBOL(mptscsih_event_process); 3231EXPORT_SYMBOL(mptscsih_event_process);
3266EXPORT_SYMBOL(mptscsih_ioc_reset); 3232EXPORT_SYMBOL(mptscsih_ioc_reset);
3267EXPORT_SYMBOL(mptscsih_change_queue_depth); 3233EXPORT_SYMBOL(mptscsih_change_queue_depth);
3268EXPORT_SYMBOL(mptscsih_timer_expired);
3269 3234
3270/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3235/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index eb3f677528ac..e0b33e04a33b 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -129,7 +129,6 @@ extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRA
129extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 129extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); 131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
132extern void mptscsih_timer_expired(unsigned long data);
133extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 132extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
134extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 133extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
135extern struct device_attribute *mptscsih_host_attrs[]; 134extern struct device_attribute *mptscsih_host_attrs[];
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index c5b808fd55ba..69f4257419b5 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1472,28 +1472,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1472 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", 1472 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
1473 ioc->name, ioc->ScsiLookup)); 1473 ioc->name, ioc->ScsiLookup));
1474 1474
1475 /* Clear the TM flags
1476 */
1477 hd->abortSCpnt = NULL;
1478
1479 /* Clear the pointer used to store
1480 * single-threaded commands, i.e., those
1481 * issued during a bus scan, dv and
1482 * configuration pages.
1483 */
1484 hd->cmdPtr = NULL;
1485
1486 /* Initialize this SCSI Hosts' timers
1487 * To use, set the timer expires field
1488 * and add_timer
1489 */
1490 init_timer(&hd->timer);
1491 hd->timer.data = (unsigned long) hd;
1492 hd->timer.function = mptscsih_timer_expired;
1493
1494 ioc->spi_data.Saf_Te = mpt_saf_te; 1475 ioc->spi_data.Saf_Te = mpt_saf_te;
1495
1496 hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
1497 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 1476 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1498 "saf_te %x\n", 1477 "saf_te %x\n",
1499 ioc->name, 1478 ioc->name,
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 348443bdb23b..7b039306037f 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -33,24 +33,44 @@ static DEFINE_MUTEX(container_list_lock);
33static struct class enclosure_class; 33static struct class enclosure_class;
34 34
35/** 35/**
36 * enclosure_find - find an enclosure given a device 36 * enclosure_find - find an enclosure given a parent device
37 * @dev: the device to find for 37 * @dev: the parent to match against
38 * @start: Optional enclosure device to start from (NULL if none)
38 * 39 *
39 * Looks through the list of registered enclosures to see 40 * Looks through the list of registered enclosures to find all those
40 * if it can find a match for a device. Returns NULL if no 41 * with @dev as a parent. Returns NULL if no enclosure is
41 * enclosure is found. Obtains a reference to the enclosure class 42 * found. @start can be used as a starting point to obtain multiple
42 * device which must be released with device_put(). 43 * enclosures per parent (should begin with NULL and then be set to
44 * each returned enclosure device). Obtains a reference to the
45 * enclosure class device which must be released with device_put().
46 * If @start is not NULL, a reference must be taken on it which is
47 * released before returning (this allows a loop through all
48 * enclosures to exit with only the reference on the enclosure of
49 * interest held). Note that the @dev may correspond to the actual
50 * device housing the enclosure, in which case no iteration via @start
51 * is required.
43 */ 52 */
44struct enclosure_device *enclosure_find(struct device *dev) 53struct enclosure_device *enclosure_find(struct device *dev,
54 struct enclosure_device *start)
45{ 55{
46 struct enclosure_device *edev; 56 struct enclosure_device *edev;
47 57
48 mutex_lock(&container_list_lock); 58 mutex_lock(&container_list_lock);
49 list_for_each_entry(edev, &container_list, node) { 59 edev = list_prepare_entry(start, &container_list, node);
50 if (edev->edev.parent == dev) { 60 if (start)
51 get_device(&edev->edev); 61 put_device(&start->edev);
52 mutex_unlock(&container_list_lock); 62
53 return edev; 63 list_for_each_entry_continue(edev, &container_list, node) {
64 struct device *parent = edev->edev.parent;
65 /* parent might not be immediate, so iterate up to
66 * the root of the tree if necessary */
67 while (parent) {
68 if (parent == dev) {
69 get_device(&edev->edev);
70 mutex_unlock(&container_list_lock);
71 return edev;
72 }
73 parent = parent->parent;
54 } 74 }
55 } 75 }
56 mutex_unlock(&container_list_lock); 76 mutex_unlock(&container_list_lock);
@@ -295,6 +315,9 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
295 315
296 cdev = &edev->component[component]; 316 cdev = &edev->component[component];
297 317
318 if (cdev->dev == dev)
319 return -EEXIST;
320
298 if (cdev->dev) 321 if (cdev->dev)
299 enclosure_remove_links(cdev); 322 enclosure_remove_links(cdev);
300 323
@@ -312,19 +335,25 @@ EXPORT_SYMBOL_GPL(enclosure_add_device);
312 * Returns zero on success or an error. 335 * Returns zero on success or an error.
313 * 336 *
314 */ 337 */
315int enclosure_remove_device(struct enclosure_device *edev, int component) 338int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
316{ 339{
317 struct enclosure_component *cdev; 340 struct enclosure_component *cdev;
341 int i;
318 342
319 if (!edev || component >= edev->components) 343 if (!edev || !dev)
320 return -EINVAL; 344 return -EINVAL;
321 345
322 cdev = &edev->component[component]; 346 for (i = 0; i < edev->components; i++) {
323 347 cdev = &edev->component[i];
324 device_del(&cdev->cdev); 348 if (cdev->dev == dev) {
325 put_device(cdev->dev); 349 enclosure_remove_links(cdev);
326 cdev->dev = NULL; 350 device_del(&cdev->cdev);
327 return device_add(&cdev->cdev); 351 put_device(dev);
352 cdev->dev = NULL;
353 return device_add(&cdev->cdev);
354 }
355 }
356 return -ENODEV;
328} 357}
329EXPORT_SYMBOL_GPL(enclosure_remove_device); 358EXPORT_SYMBOL_GPL(enclosure_remove_device);
330 359
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 2ccbd185a5fb..1be6bf7e8ce6 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -42,6 +42,12 @@ static char *init_device;
42module_param_named(device, init_device, charp, 0400); 42module_param_named(device, init_device, charp, 0400);
43MODULE_PARM_DESC(device, "specify initial device"); 43MODULE_PARM_DESC(device, "specify initial device");
44 44
45static struct kmem_cache *zfcp_cache_hw_align(const char *name,
46 unsigned long size)
47{
48 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
49}
50
45static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) 51static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
46{ 52{
47 int idx; 53 int idx;
@@ -78,7 +84,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
78 struct zfcp_port *port; 84 struct zfcp_port *port;
79 struct zfcp_unit *unit; 85 struct zfcp_unit *unit;
80 86
81 down(&zfcp_data.config_sema); 87 mutex_lock(&zfcp_data.config_mutex);
82 read_lock_irq(&zfcp_data.config_lock); 88 read_lock_irq(&zfcp_data.config_lock);
83 adapter = zfcp_get_adapter_by_busid(busid); 89 adapter = zfcp_get_adapter_by_busid(busid);
84 if (adapter) 90 if (adapter)
@@ -93,31 +99,23 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
93 unit = zfcp_unit_enqueue(port, lun); 99 unit = zfcp_unit_enqueue(port, lun);
94 if (IS_ERR(unit)) 100 if (IS_ERR(unit))
95 goto out_unit; 101 goto out_unit;
96 up(&zfcp_data.config_sema); 102 mutex_unlock(&zfcp_data.config_mutex);
97 ccw_device_set_online(adapter->ccw_device); 103 ccw_device_set_online(adapter->ccw_device);
98 104
99 zfcp_erp_wait(adapter); 105 zfcp_erp_wait(adapter);
100 flush_work(&unit->scsi_work); 106 flush_work(&unit->scsi_work);
101 107
102 down(&zfcp_data.config_sema); 108 mutex_lock(&zfcp_data.config_mutex);
103 zfcp_unit_put(unit); 109 zfcp_unit_put(unit);
104out_unit: 110out_unit:
105 zfcp_port_put(port); 111 zfcp_port_put(port);
106out_port: 112out_port:
107 zfcp_adapter_put(adapter); 113 zfcp_adapter_put(adapter);
108out_adapter: 114out_adapter:
109 up(&zfcp_data.config_sema); 115 mutex_unlock(&zfcp_data.config_mutex);
110 return; 116 return;
111} 117}
112 118
113static struct kmem_cache *zfcp_cache_create(int size, char *name)
114{
115 int align = 1;
116 while ((size - align) > 0)
117 align <<= 1;
118 return kmem_cache_create(name , size, align, 0, NULL);
119}
120
121static void __init zfcp_init_device_setup(char *devstr) 119static void __init zfcp_init_device_setup(char *devstr)
122{ 120{
123 char *token; 121 char *token;
@@ -158,24 +156,27 @@ static int __init zfcp_module_init(void)
158{ 156{
159 int retval = -ENOMEM; 157 int retval = -ENOMEM;
160 158
161 zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create( 159 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
162 sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf"); 160 sizeof(struct ct_iu_gpn_ft_req));
163 if (!zfcp_data.fsf_req_qtcb_cache) 161 if (!zfcp_data.gpn_ft_cache)
164 goto out; 162 goto out;
165 163
166 zfcp_data.sr_buffer_cache = zfcp_cache_create( 164 zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
167 sizeof(struct fsf_status_read_buffer), "zfcp_sr"); 165 sizeof(struct fsf_qtcb));
166 if (!zfcp_data.qtcb_cache)
167 goto out_qtcb_cache;
168
169 zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
170 sizeof(struct fsf_status_read_buffer));
168 if (!zfcp_data.sr_buffer_cache) 171 if (!zfcp_data.sr_buffer_cache)
169 goto out_sr_cache; 172 goto out_sr_cache;
170 173
171 zfcp_data.gid_pn_cache = zfcp_cache_create( 174 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
172 sizeof(struct zfcp_gid_pn_data), "zfcp_gid"); 175 sizeof(struct zfcp_gid_pn_data));
173 if (!zfcp_data.gid_pn_cache) 176 if (!zfcp_data.gid_pn_cache)
174 goto out_gid_cache; 177 goto out_gid_cache;
175 178
176 zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq"); 179 mutex_init(&zfcp_data.config_mutex);
177
178 sema_init(&zfcp_data.config_sema, 1);
179 rwlock_init(&zfcp_data.config_lock); 180 rwlock_init(&zfcp_data.config_lock);
180 181
181 zfcp_data.scsi_transport_template = 182 zfcp_data.scsi_transport_template =
@@ -209,7 +210,9 @@ out_transport:
209out_gid_cache: 210out_gid_cache:
210 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 211 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
211out_sr_cache: 212out_sr_cache:
212 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); 213 kmem_cache_destroy(zfcp_data.qtcb_cache);
214out_qtcb_cache:
215 kmem_cache_destroy(zfcp_data.gpn_ft_cache);
213out: 216out:
214 return retval; 217 return retval;
215} 218}
@@ -263,7 +266,7 @@ static void zfcp_sysfs_unit_release(struct device *dev)
263 * @port: pointer to port where unit is added 266 * @port: pointer to port where unit is added
264 * @fcp_lun: FCP LUN of unit to be enqueued 267 * @fcp_lun: FCP LUN of unit to be enqueued
265 * Returns: pointer to enqueued unit on success, ERR_PTR on error 268 * Returns: pointer to enqueued unit on success, ERR_PTR on error
266 * Locks: config_sema must be held to serialize changes to the unit list 269 * Locks: config_mutex must be held to serialize changes to the unit list
267 * 270 *
268 * Sets up some unit internal structures and creates sysfs entry. 271 * Sets up some unit internal structures and creates sysfs entry.
269 */ 272 */
@@ -271,6 +274,13 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
271{ 274{
272 struct zfcp_unit *unit; 275 struct zfcp_unit *unit;
273 276
277 read_lock_irq(&zfcp_data.config_lock);
278 if (zfcp_get_unit_by_lun(port, fcp_lun)) {
279 read_unlock_irq(&zfcp_data.config_lock);
280 return ERR_PTR(-EINVAL);
281 }
282 read_unlock_irq(&zfcp_data.config_lock);
283
274 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); 284 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
275 if (!unit) 285 if (!unit)
276 return ERR_PTR(-ENOMEM); 286 return ERR_PTR(-ENOMEM);
@@ -282,8 +292,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
282 unit->port = port; 292 unit->port = port;
283 unit->fcp_lun = fcp_lun; 293 unit->fcp_lun = fcp_lun;
284 294
285 dev_set_name(&unit->sysfs_device, "0x%016llx", 295 if (dev_set_name(&unit->sysfs_device, "0x%016llx",
286 (unsigned long long) fcp_lun); 296 (unsigned long long) fcp_lun)) {
297 kfree(unit);
298 return ERR_PTR(-ENOMEM);
299 }
287 unit->sysfs_device.parent = &port->sysfs_device; 300 unit->sysfs_device.parent = &port->sysfs_device;
288 unit->sysfs_device.release = zfcp_sysfs_unit_release; 301 unit->sysfs_device.release = zfcp_sysfs_unit_release;
289 dev_set_drvdata(&unit->sysfs_device, unit); 302 dev_set_drvdata(&unit->sysfs_device, unit);
@@ -299,20 +312,15 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
299 unit->latencies.cmd.channel.min = 0xFFFFFFFF; 312 unit->latencies.cmd.channel.min = 0xFFFFFFFF;
300 unit->latencies.cmd.fabric.min = 0xFFFFFFFF; 313 unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
301 314
302 read_lock_irq(&zfcp_data.config_lock); 315 if (device_register(&unit->sysfs_device)) {
303 if (zfcp_get_unit_by_lun(port, fcp_lun)) { 316 put_device(&unit->sysfs_device);
304 read_unlock_irq(&zfcp_data.config_lock); 317 return ERR_PTR(-EINVAL);
305 goto err_out_free;
306 } 318 }
307 read_unlock_irq(&zfcp_data.config_lock);
308
309 if (device_register(&unit->sysfs_device))
310 goto err_out_free;
311 319
312 if (sysfs_create_group(&unit->sysfs_device.kobj, 320 if (sysfs_create_group(&unit->sysfs_device.kobj,
313 &zfcp_sysfs_unit_attrs)) { 321 &zfcp_sysfs_unit_attrs)) {
314 device_unregister(&unit->sysfs_device); 322 device_unregister(&unit->sysfs_device);
315 return ERR_PTR(-EIO); 323 return ERR_PTR(-EINVAL);
316 } 324 }
317 325
318 zfcp_unit_get(unit); 326 zfcp_unit_get(unit);
@@ -327,10 +335,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
327 zfcp_port_get(port); 335 zfcp_port_get(port);
328 336
329 return unit; 337 return unit;
330
331err_out_free:
332 kfree(unit);
333 return ERR_PTR(-EINVAL);
334} 338}
335 339
336/** 340/**
@@ -353,37 +357,47 @@ void zfcp_unit_dequeue(struct zfcp_unit *unit)
353 357
354static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 358static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
355{ 359{
356 /* must only be called with zfcp_data.config_sema taken */ 360 /* must only be called with zfcp_data.config_mutex taken */
357 adapter->pool.fsf_req_erp = 361 adapter->pool.erp_req =
358 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); 362 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
359 if (!adapter->pool.fsf_req_erp) 363 if (!adapter->pool.erp_req)
364 return -ENOMEM;
365
366 adapter->pool.gid_pn_req =
367 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
368 if (!adapter->pool.gid_pn_req)
360 return -ENOMEM; 369 return -ENOMEM;
361 370
362 adapter->pool.fsf_req_scsi = 371 adapter->pool.scsi_req =
363 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); 372 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
364 if (!adapter->pool.fsf_req_scsi) 373 if (!adapter->pool.scsi_req)
365 return -ENOMEM; 374 return -ENOMEM;
366 375
367 adapter->pool.fsf_req_abort = 376 adapter->pool.scsi_abort =
368 mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); 377 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
369 if (!adapter->pool.fsf_req_abort) 378 if (!adapter->pool.scsi_abort)
370 return -ENOMEM; 379 return -ENOMEM;
371 380
372 adapter->pool.fsf_req_status_read = 381 adapter->pool.status_read_req =
373 mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, 382 mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
374 sizeof(struct zfcp_fsf_req)); 383 sizeof(struct zfcp_fsf_req));
375 if (!adapter->pool.fsf_req_status_read) 384 if (!adapter->pool.status_read_req)
376 return -ENOMEM; 385 return -ENOMEM;
377 386
378 adapter->pool.data_status_read = 387 adapter->pool.qtcb_pool =
388 mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
389 if (!adapter->pool.qtcb_pool)
390 return -ENOMEM;
391
392 adapter->pool.status_read_data =
379 mempool_create_slab_pool(FSF_STATUS_READS_RECOM, 393 mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
380 zfcp_data.sr_buffer_cache); 394 zfcp_data.sr_buffer_cache);
381 if (!adapter->pool.data_status_read) 395 if (!adapter->pool.status_read_data)
382 return -ENOMEM; 396 return -ENOMEM;
383 397
384 adapter->pool.data_gid_pn = 398 adapter->pool.gid_pn_data =
385 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); 399 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
386 if (!adapter->pool.data_gid_pn) 400 if (!adapter->pool.gid_pn_data)
387 return -ENOMEM; 401 return -ENOMEM;
388 402
389 return 0; 403 return 0;
@@ -391,19 +405,21 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
391 405
392static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) 406static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
393{ 407{
394 /* zfcp_data.config_sema must be held */ 408 /* zfcp_data.config_mutex must be held */
395 if (adapter->pool.fsf_req_erp) 409 if (adapter->pool.erp_req)
396 mempool_destroy(adapter->pool.fsf_req_erp); 410 mempool_destroy(adapter->pool.erp_req);
397 if (adapter->pool.fsf_req_scsi) 411 if (adapter->pool.scsi_req)
398 mempool_destroy(adapter->pool.fsf_req_scsi); 412 mempool_destroy(adapter->pool.scsi_req);
399 if (adapter->pool.fsf_req_abort) 413 if (adapter->pool.scsi_abort)
400 mempool_destroy(adapter->pool.fsf_req_abort); 414 mempool_destroy(adapter->pool.scsi_abort);
401 if (adapter->pool.fsf_req_status_read) 415 if (adapter->pool.qtcb_pool)
402 mempool_destroy(adapter->pool.fsf_req_status_read); 416 mempool_destroy(adapter->pool.qtcb_pool);
403 if (adapter->pool.data_status_read) 417 if (adapter->pool.status_read_req)
404 mempool_destroy(adapter->pool.data_status_read); 418 mempool_destroy(adapter->pool.status_read_req);
405 if (adapter->pool.data_gid_pn) 419 if (adapter->pool.status_read_data)
406 mempool_destroy(adapter->pool.data_gid_pn); 420 mempool_destroy(adapter->pool.status_read_data);
421 if (adapter->pool.gid_pn_data)
422 mempool_destroy(adapter->pool.gid_pn_data);
407} 423}
408 424
409/** 425/**
@@ -418,7 +434,7 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
418int zfcp_status_read_refill(struct zfcp_adapter *adapter) 434int zfcp_status_read_refill(struct zfcp_adapter *adapter)
419{ 435{
420 while (atomic_read(&adapter->stat_miss) > 0) 436 while (atomic_read(&adapter->stat_miss) > 0)
421 if (zfcp_fsf_status_read(adapter)) { 437 if (zfcp_fsf_status_read(adapter->qdio)) {
422 if (atomic_read(&adapter->stat_miss) >= 16) { 438 if (atomic_read(&adapter->stat_miss) >= 16) {
423 zfcp_erp_adapter_reopen(adapter, 0, "axsref1", 439 zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
424 NULL); 440 NULL);
@@ -446,6 +462,27 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
446 adapter->fsf_lic_version); 462 adapter->fsf_lic_version);
447} 463}
448 464
465static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
466{
467 char name[TASK_COMM_LEN];
468
469 snprintf(name, sizeof(name), "zfcp_q_%s",
470 dev_name(&adapter->ccw_device->dev));
471 adapter->work_queue = create_singlethread_workqueue(name);
472
473 if (adapter->work_queue)
474 return 0;
475 return -ENOMEM;
476}
477
478static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
479{
480 if (adapter->work_queue)
481 destroy_workqueue(adapter->work_queue);
482 adapter->work_queue = NULL;
483
484}
485
449/** 486/**
450 * zfcp_adapter_enqueue - enqueue a new adapter to the list 487 * zfcp_adapter_enqueue - enqueue a new adapter to the list
451 * @ccw_device: pointer to the struct cc_device 488 * @ccw_device: pointer to the struct cc_device
@@ -455,7 +492,7 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
455 * Enqueues an adapter at the end of the adapter list in the driver data. 492 * Enqueues an adapter at the end of the adapter list in the driver data.
456 * All adapter internal structures are set up. 493 * All adapter internal structures are set up.
457 * Proc-fs entries are also created. 494 * Proc-fs entries are also created.
458 * locks: config_sema must be held to serialise changes to the adapter list 495 * locks: config_mutex must be held to serialize changes to the adapter list
459 */ 496 */
460int zfcp_adapter_enqueue(struct ccw_device *ccw_device) 497int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
461{ 498{
@@ -463,37 +500,37 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
463 500
464 /* 501 /*
465 * Note: It is safe to release the list_lock, as any list changes 502 * Note: It is safe to release the list_lock, as any list changes
466 * are protected by the config_sema, which must be held to get here 503 * are protected by the config_mutex, which must be held to get here
467 */ 504 */
468 505
469 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); 506 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
470 if (!adapter) 507 if (!adapter)
471 return -ENOMEM; 508 return -ENOMEM;
472 509
473 adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
474 if (!adapter->gs) {
475 kfree(adapter);
476 return -ENOMEM;
477 }
478
479 ccw_device->handler = NULL; 510 ccw_device->handler = NULL;
480 adapter->ccw_device = ccw_device; 511 adapter->ccw_device = ccw_device;
481 atomic_set(&adapter->refcount, 0); 512 atomic_set(&adapter->refcount, 0);
482 513
483 if (zfcp_qdio_allocate(adapter)) 514 if (zfcp_qdio_setup(adapter))
484 goto qdio_allocate_failed; 515 goto qdio_failed;
485 516
486 if (zfcp_allocate_low_mem_buffers(adapter)) 517 if (zfcp_allocate_low_mem_buffers(adapter))
487 goto failed_low_mem_buffers; 518 goto low_mem_buffers_failed;
488 519
489 if (zfcp_reqlist_alloc(adapter)) 520 if (zfcp_reqlist_alloc(adapter))
490 goto failed_low_mem_buffers; 521 goto low_mem_buffers_failed;
491 522
492 if (zfcp_adapter_debug_register(adapter)) 523 if (zfcp_dbf_adapter_register(adapter))
493 goto debug_register_failed; 524 goto debug_register_failed;
494 525
526 if (zfcp_setup_adapter_work_queue(adapter))
527 goto work_queue_failed;
528
529 if (zfcp_fc_gs_setup(adapter))
530 goto generic_services_failed;
531
495 init_waitqueue_head(&adapter->remove_wq); 532 init_waitqueue_head(&adapter->remove_wq);
496 init_waitqueue_head(&adapter->erp_thread_wqh); 533 init_waitqueue_head(&adapter->erp_ready_wq);
497 init_waitqueue_head(&adapter->erp_done_wqh); 534 init_waitqueue_head(&adapter->erp_done_wqh);
498 535
499 INIT_LIST_HEAD(&adapter->port_list_head); 536 INIT_LIST_HEAD(&adapter->port_list_head);
@@ -502,20 +539,14 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
502 539
503 spin_lock_init(&adapter->req_list_lock); 540 spin_lock_init(&adapter->req_list_lock);
504 541
505 spin_lock_init(&adapter->hba_dbf_lock);
506 spin_lock_init(&adapter->san_dbf_lock);
507 spin_lock_init(&adapter->scsi_dbf_lock);
508 spin_lock_init(&adapter->rec_dbf_lock);
509 spin_lock_init(&adapter->req_q_lock);
510 spin_lock_init(&adapter->qdio_stat_lock);
511
512 rwlock_init(&adapter->erp_lock); 542 rwlock_init(&adapter->erp_lock);
513 rwlock_init(&adapter->abort_lock); 543 rwlock_init(&adapter->abort_lock);
514 544
515 sema_init(&adapter->erp_ready_sem, 0); 545 if (zfcp_erp_thread_setup(adapter))
546 goto erp_thread_failed;
516 547
517 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); 548 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
518 INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); 549 INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
519 550
520 adapter->service_level.seq_print = zfcp_print_sl; 551 adapter->service_level.seq_print = zfcp_print_sl;
521 552
@@ -529,20 +560,25 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
529 goto sysfs_failed; 560 goto sysfs_failed;
530 561
531 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 562 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
532 zfcp_fc_wka_ports_init(adapter);
533 563
534 if (!zfcp_adapter_scsi_register(adapter)) 564 if (!zfcp_adapter_scsi_register(adapter))
535 return 0; 565 return 0;
536 566
537sysfs_failed: 567sysfs_failed:
538 zfcp_adapter_debug_unregister(adapter); 568 zfcp_erp_thread_kill(adapter);
569erp_thread_failed:
570 zfcp_fc_gs_destroy(adapter);
571generic_services_failed:
572 zfcp_destroy_adapter_work_queue(adapter);
573work_queue_failed:
574 zfcp_dbf_adapter_unregister(adapter->dbf);
539debug_register_failed: 575debug_register_failed:
540 dev_set_drvdata(&ccw_device->dev, NULL); 576 dev_set_drvdata(&ccw_device->dev, NULL);
541 kfree(adapter->req_list); 577 kfree(adapter->req_list);
542failed_low_mem_buffers: 578low_mem_buffers_failed:
543 zfcp_free_low_mem_buffers(adapter); 579 zfcp_free_low_mem_buffers(adapter);
544qdio_allocate_failed: 580qdio_failed:
545 zfcp_qdio_free(adapter); 581 zfcp_qdio_destroy(adapter->qdio);
546 kfree(adapter); 582 kfree(adapter);
547 return -ENOMEM; 583 return -ENOMEM;
548} 584}
@@ -559,6 +595,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
559 595
560 cancel_work_sync(&adapter->scan_work); 596 cancel_work_sync(&adapter->scan_work);
561 cancel_work_sync(&adapter->stat_work); 597 cancel_work_sync(&adapter->stat_work);
598 zfcp_fc_wka_ports_force_offline(adapter->gs);
562 zfcp_adapter_scsi_unregister(adapter); 599 zfcp_adapter_scsi_unregister(adapter);
563 sysfs_remove_group(&adapter->ccw_device->dev.kobj, 600 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
564 &zfcp_sysfs_adapter_attrs); 601 &zfcp_sysfs_adapter_attrs);
@@ -570,13 +607,15 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
570 if (!retval) 607 if (!retval)
571 return; 608 return;
572 609
573 zfcp_adapter_debug_unregister(adapter); 610 zfcp_fc_gs_destroy(adapter);
574 zfcp_qdio_free(adapter); 611 zfcp_erp_thread_kill(adapter);
612 zfcp_destroy_adapter_work_queue(adapter);
613 zfcp_dbf_adapter_unregister(adapter->dbf);
575 zfcp_free_low_mem_buffers(adapter); 614 zfcp_free_low_mem_buffers(adapter);
615 zfcp_qdio_destroy(adapter->qdio);
576 kfree(adapter->req_list); 616 kfree(adapter->req_list);
577 kfree(adapter->fc_stats); 617 kfree(adapter->fc_stats);
578 kfree(adapter->stats_reset_data); 618 kfree(adapter->stats_reset_data);
579 kfree(adapter->gs);
580 kfree(adapter); 619 kfree(adapter);
581} 620}
582 621
@@ -592,7 +631,7 @@ static void zfcp_sysfs_port_release(struct device *dev)
592 * @status: initial status for the port 631 * @status: initial status for the port
593 * @d_id: destination id of the remote port to be enqueued 632 * @d_id: destination id of the remote port to be enqueued
594 * Returns: pointer to enqueued port on success, ERR_PTR on error 633 * Returns: pointer to enqueued port on success, ERR_PTR on error
595 * Locks: config_sema must be held to serialize changes to the port list 634 * Locks: config_mutex must be held to serialize changes to the port list
596 * 635 *
597 * All port internal structures are set up and the sysfs entry is generated. 636 * All port internal structures are set up and the sysfs entry is generated.
598 * d_id is used to enqueue ports with a well known address like the Directory 637 * d_id is used to enqueue ports with a well known address like the Directory
@@ -602,7 +641,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
602 u32 status, u32 d_id) 641 u32 status, u32 d_id)
603{ 642{
604 struct zfcp_port *port; 643 struct zfcp_port *port;
605 int retval; 644
645 read_lock_irq(&zfcp_data.config_lock);
646 if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
647 read_unlock_irq(&zfcp_data.config_lock);
648 return ERR_PTR(-EINVAL);
649 }
650 read_unlock_irq(&zfcp_data.config_lock);
606 651
607 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 652 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
608 if (!port) 653 if (!port)
@@ -610,7 +655,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
610 655
611 init_waitqueue_head(&port->remove_wq); 656 init_waitqueue_head(&port->remove_wq);
612 INIT_LIST_HEAD(&port->unit_list_head); 657 INIT_LIST_HEAD(&port->unit_list_head);
613 INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup); 658 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
614 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); 659 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
615 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); 660 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
616 661
@@ -623,29 +668,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
623 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 668 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
624 atomic_set(&port->refcount, 0); 669 atomic_set(&port->refcount, 0);
625 670
626 dev_set_name(&port->sysfs_device, "0x%016llx", 671 if (dev_set_name(&port->sysfs_device, "0x%016llx",
627 (unsigned long long)wwpn); 672 (unsigned long long)wwpn)) {
673 kfree(port);
674 return ERR_PTR(-ENOMEM);
675 }
628 port->sysfs_device.parent = &adapter->ccw_device->dev; 676 port->sysfs_device.parent = &adapter->ccw_device->dev;
629
630 port->sysfs_device.release = zfcp_sysfs_port_release; 677 port->sysfs_device.release = zfcp_sysfs_port_release;
631 dev_set_drvdata(&port->sysfs_device, port); 678 dev_set_drvdata(&port->sysfs_device, port);
632 679
633 read_lock_irq(&zfcp_data.config_lock); 680 if (device_register(&port->sysfs_device)) {
634 if (zfcp_get_port_by_wwpn(adapter, wwpn)) { 681 put_device(&port->sysfs_device);
635 read_unlock_irq(&zfcp_data.config_lock); 682 return ERR_PTR(-EINVAL);
636 goto err_out_free;
637 } 683 }
638 read_unlock_irq(&zfcp_data.config_lock);
639 684
640 if (device_register(&port->sysfs_device)) 685 if (sysfs_create_group(&port->sysfs_device.kobj,
641 goto err_out_free; 686 &zfcp_sysfs_port_attrs)) {
642
643 retval = sysfs_create_group(&port->sysfs_device.kobj,
644 &zfcp_sysfs_port_attrs);
645
646 if (retval) {
647 device_unregister(&port->sysfs_device); 687 device_unregister(&port->sysfs_device);
648 goto err_out; 688 return ERR_PTR(-EINVAL);
649 } 689 }
650 690
651 zfcp_port_get(port); 691 zfcp_port_get(port);
@@ -659,11 +699,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
659 699
660 zfcp_adapter_get(adapter); 700 zfcp_adapter_get(adapter);
661 return port; 701 return port;
662
663err_out_free:
664 kfree(port);
665err_out:
666 return ERR_PTR(-EINVAL);
667} 702}
668 703
669/** 704/**
@@ -672,12 +707,11 @@ err_out:
672 */ 707 */
673void zfcp_port_dequeue(struct zfcp_port *port) 708void zfcp_port_dequeue(struct zfcp_port *port)
674{ 709{
675 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
676 write_lock_irq(&zfcp_data.config_lock); 710 write_lock_irq(&zfcp_data.config_lock);
677 list_del(&port->list); 711 list_del(&port->list);
678 write_unlock_irq(&zfcp_data.config_lock); 712 write_unlock_irq(&zfcp_data.config_lock);
679 if (port->rport) 713 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
680 port->rport->dd_data = NULL; 714 cancel_work_sync(&port->rport_work); /* usually not necessary */
681 zfcp_adapter_put(port->adapter); 715 zfcp_adapter_put(port->adapter);
682 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); 716 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
683 device_unregister(&port->sysfs_device); 717 device_unregister(&port->sysfs_device);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index d9da5c42ccbe..0c90f8e71605 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -18,12 +18,15 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev)
18{ 18{
19 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 19 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
20 20
21 down(&zfcp_data.config_sema); 21 if (!adapter)
22 return 0;
23
24 mutex_lock(&zfcp_data.config_mutex);
22 25
23 zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); 26 zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL);
24 zfcp_erp_wait(adapter); 27 zfcp_erp_wait(adapter);
25 28
26 up(&zfcp_data.config_sema); 29 mutex_unlock(&zfcp_data.config_mutex);
27 30
28 return 0; 31 return 0;
29} 32}
@@ -33,6 +36,9 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
33{ 36{
34 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 37 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
35 38
39 if (!adapter)
40 return 0;
41
36 zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, 42 zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL,
37 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); 43 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
38 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 44 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -63,25 +69,14 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
63 * zfcp_ccw_probe - probe function of zfcp driver 69 * zfcp_ccw_probe - probe function of zfcp driver
64 * @ccw_device: pointer to belonging ccw device 70 * @ccw_device: pointer to belonging ccw device
65 * 71 *
66 * This function gets called by the common i/o layer and sets up the initial 72 * This function gets called by the common i/o layer for each FCP
67 * data structures for each fcp adapter, which was detected by the system. 73 * device found on the current system. This is only a stub to make cio
68 * Also the sysfs files for this adapter will be created by this function. 74 * work: To only allocate adapter resources for devices actually used,
69 * In addition the nameserver port will be added to the ports of the adapter 75 * the allocation is deferred to the first call to ccw_set_online.
70 * and its sysfs representation will be created too.
71 */ 76 */
72static int zfcp_ccw_probe(struct ccw_device *ccw_device) 77static int zfcp_ccw_probe(struct ccw_device *ccw_device)
73{ 78{
74 int retval = 0; 79 return 0;
75
76 down(&zfcp_data.config_sema);
77 if (zfcp_adapter_enqueue(ccw_device)) {
78 dev_err(&ccw_device->dev,
79 "Setting up data structures for the "
80 "FCP adapter failed\n");
81 retval = -EINVAL;
82 }
83 up(&zfcp_data.config_sema);
84 return retval;
85} 80}
86 81
87/** 82/**
@@ -102,8 +97,11 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
102 LIST_HEAD(port_remove_lh); 97 LIST_HEAD(port_remove_lh);
103 98
104 ccw_device_set_offline(ccw_device); 99 ccw_device_set_offline(ccw_device);
105 down(&zfcp_data.config_sema); 100
101 mutex_lock(&zfcp_data.config_mutex);
106 adapter = dev_get_drvdata(&ccw_device->dev); 102 adapter = dev_get_drvdata(&ccw_device->dev);
103 if (!adapter)
104 goto out;
107 105
108 write_lock_irq(&zfcp_data.config_lock); 106 write_lock_irq(&zfcp_data.config_lock);
109 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 107 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
@@ -129,29 +127,41 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
129 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); 127 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
130 zfcp_adapter_dequeue(adapter); 128 zfcp_adapter_dequeue(adapter);
131 129
132 up(&zfcp_data.config_sema); 130out:
131 mutex_unlock(&zfcp_data.config_mutex);
133} 132}
134 133
135/** 134/**
136 * zfcp_ccw_set_online - set_online function of zfcp driver 135 * zfcp_ccw_set_online - set_online function of zfcp driver
137 * @ccw_device: pointer to belonging ccw device 136 * @ccw_device: pointer to belonging ccw device
138 * 137 *
139 * This function gets called by the common i/o layer and sets an adapter 138 * This function gets called by the common i/o layer and sets an
140 * into state online. Setting an fcp device online means that it will be 139 * adapter into state online. The first call will allocate all
141 * registered with the SCSI stack, that the QDIO queues will be set up 140 * adapter resources that will be retained until the device is removed
142 * and that the adapter will be opened (asynchronously). 141 * via zfcp_ccw_remove.
142 *
143 * Setting an fcp device online means that it will be registered with
144 * the SCSI stack, that the QDIO queues will be set up and that the
145 * adapter will be opened.
143 */ 146 */
144static int zfcp_ccw_set_online(struct ccw_device *ccw_device) 147static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
145{ 148{
146 struct zfcp_adapter *adapter; 149 struct zfcp_adapter *adapter;
147 int retval; 150 int ret = 0;
148 151
149 down(&zfcp_data.config_sema); 152 mutex_lock(&zfcp_data.config_mutex);
150 adapter = dev_get_drvdata(&ccw_device->dev); 153 adapter = dev_get_drvdata(&ccw_device->dev);
151 154
152 retval = zfcp_erp_thread_setup(adapter); 155 if (!adapter) {
153 if (retval) 156 ret = zfcp_adapter_enqueue(ccw_device);
154 goto out; 157 if (ret) {
158 dev_err(&ccw_device->dev,
159 "Setting up data structures for the "
160 "FCP adapter failed\n");
161 goto out;
162 }
163 adapter = dev_get_drvdata(&ccw_device->dev);
164 }
155 165
156 /* initialize request counter */ 166 /* initialize request counter */
157 BUG_ON(!zfcp_reqlist_isempty(adapter)); 167 BUG_ON(!zfcp_reqlist_isempty(adapter));
@@ -162,13 +172,11 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
162 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 172 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
163 "ccsonl2", NULL); 173 "ccsonl2", NULL);
164 zfcp_erp_wait(adapter); 174 zfcp_erp_wait(adapter);
165 up(&zfcp_data.config_sema); 175out:
166 flush_work(&adapter->scan_work); 176 mutex_unlock(&zfcp_data.config_mutex);
167 return 0; 177 if (!ret)
168 178 flush_work(&adapter->scan_work);
169 out: 179 return ret;
170 up(&zfcp_data.config_sema);
171 return retval;
172} 180}
173 181
174/** 182/**
@@ -182,12 +190,15 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device)
182{ 190{
183 struct zfcp_adapter *adapter; 191 struct zfcp_adapter *adapter;
184 192
185 down(&zfcp_data.config_sema); 193 mutex_lock(&zfcp_data.config_mutex);
186 adapter = dev_get_drvdata(&ccw_device->dev); 194 adapter = dev_get_drvdata(&ccw_device->dev);
195 if (!adapter)
196 goto out;
197
187 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); 198 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
188 zfcp_erp_wait(adapter); 199 zfcp_erp_wait(adapter);
189 zfcp_erp_thread_kill(adapter); 200 mutex_unlock(&zfcp_data.config_mutex);
190 up(&zfcp_data.config_sema); 201out:
191 return 0; 202 return 0;
192} 203}
193 204
@@ -240,11 +251,12 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
240{ 251{
241 struct zfcp_adapter *adapter; 252 struct zfcp_adapter *adapter;
242 253
243 down(&zfcp_data.config_sema); 254 mutex_lock(&zfcp_data.config_mutex);
244 adapter = dev_get_drvdata(&cdev->dev); 255 adapter = dev_get_drvdata(&cdev->dev);
245 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); 256 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
246 zfcp_erp_wait(adapter); 257 zfcp_erp_wait(adapter);
247 up(&zfcp_data.config_sema); 258 zfcp_erp_thread_kill(adapter);
259 mutex_unlock(&zfcp_data.config_mutex);
248} 260}
249 261
250static struct ccw_driver zfcp_ccw_driver = { 262static struct ccw_driver zfcp_ccw_driver = {
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index b99b87ce5a39..215b70749e95 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Debug traces for zfcp. 4 * Debug traces for zfcp.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
11 11
12#include <linux/ctype.h> 12#include <linux/ctype.h>
13#include <asm/debug.h> 13#include <asm/debug.h>
14#include "zfcp_dbf.h"
14#include "zfcp_ext.h" 15#include "zfcp_ext.h"
15 16
16static u32 dbfsize = 4; 17static u32 dbfsize = 4;
@@ -37,19 +38,6 @@ static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
37 } 38 }
38} 39}
39 40
40/* FIXME: this duplicate this code in s390 debug feature */
41static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time)
42{
43 unsigned long long sec;
44
45 stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
46 sec = stck >> 12;
47 do_div(sec, 1000000);
48 time->tv_sec = sec;
49 stck -= (sec * 1000000) << 12;
50 time->tv_nsec = ((stck * 1000) >> 12);
51}
52
53static void zfcp_dbf_tag(char **p, const char *label, const char *tag) 41static void zfcp_dbf_tag(char **p, const char *label, const char *tag)
54{ 42{
55 int i; 43 int i;
@@ -106,7 +94,7 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view,
106 char *p = out_buf; 94 char *p = out_buf;
107 95
108 if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { 96 if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
109 zfcp_dbf_timestamp(entry->id.stck, &t); 97 stck_to_timespec(entry->id.stck, &t);
110 zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", 98 zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu",
111 t.tv_sec, t.tv_nsec); 99 t.tv_sec, t.tv_nsec);
112 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); 100 zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
@@ -119,13 +107,10 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view,
119 return p - out_buf; 107 return p - out_buf;
120} 108}
121 109
122/** 110void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
123 * zfcp_hba_dbf_event_fsf_response - trace event for request completion 111 struct zfcp_fsf_req *fsf_req,
124 * @fsf_req: request that has been completed 112 struct zfcp_dbf *dbf)
125 */
126void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
127{ 113{
128 struct zfcp_adapter *adapter = fsf_req->adapter;
129 struct fsf_qtcb *qtcb = fsf_req->qtcb; 114 struct fsf_qtcb *qtcb = fsf_req->qtcb;
130 union fsf_prot_status_qual *prot_status_qual = 115 union fsf_prot_status_qual *prot_status_qual =
131 &qtcb->prefix.prot_status_qual; 116 &qtcb->prefix.prot_status_qual;
@@ -134,33 +119,14 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
134 struct zfcp_port *port; 119 struct zfcp_port *port;
135 struct zfcp_unit *unit; 120 struct zfcp_unit *unit;
136 struct zfcp_send_els *send_els; 121 struct zfcp_send_els *send_els;
137 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; 122 struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
138 struct zfcp_hba_dbf_record_response *response = &rec->u.response; 123 struct zfcp_dbf_hba_record_response *response = &rec->u.response;
139 int level;
140 unsigned long flags; 124 unsigned long flags;
141 125
142 spin_lock_irqsave(&adapter->hba_dbf_lock, flags); 126 spin_lock_irqsave(&dbf->hba_lock, flags);
143 memset(rec, 0, sizeof(*rec)); 127 memset(rec, 0, sizeof(*rec));
144 strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); 128 strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
145 129 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
146 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
147 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
148 strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
149 level = 1;
150 } else if (qtcb->header.fsf_status != FSF_GOOD) {
151 strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
152 level = 1;
153 } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
154 (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
155 strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
156 level = 4;
157 } else if (qtcb->header.log_length) {
158 strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE);
159 level = 5;
160 } else {
161 strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
162 level = 6;
163 }
164 130
165 response->fsf_command = fsf_req->fsf_command; 131 response->fsf_command = fsf_req->fsf_command;
166 response->fsf_reqid = fsf_req->req_id; 132 response->fsf_reqid = fsf_req->req_id;
@@ -173,9 +139,9 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
173 memcpy(response->fsf_status_qual, 139 memcpy(response->fsf_status_qual,
174 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); 140 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
175 response->fsf_req_status = fsf_req->status; 141 response->fsf_req_status = fsf_req->status;
176 response->sbal_first = fsf_req->sbal_first; 142 response->sbal_first = fsf_req->queue_req.sbal_first;
177 response->sbal_last = fsf_req->sbal_last; 143 response->sbal_last = fsf_req->queue_req.sbal_last;
178 response->sbal_response = fsf_req->sbal_response; 144 response->sbal_response = fsf_req->queue_req.sbal_response;
179 response->pool = fsf_req->pool != NULL; 145 response->pool = fsf_req->pool != NULL;
180 response->erp_action = (unsigned long)fsf_req->erp_action; 146 response->erp_action = (unsigned long)fsf_req->erp_action;
181 147
@@ -224,7 +190,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
224 break; 190 break;
225 } 191 }
226 192
227 debug_event(adapter->hba_dbf, level, rec, sizeof(*rec)); 193 debug_event(dbf->hba, level, rec, sizeof(*rec));
228 194
229 /* have fcp channel microcode fixed to use as little as possible */ 195 /* have fcp channel microcode fixed to use as little as possible */
230 if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { 196 if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) {
@@ -232,31 +198,25 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
232 char *buf = (char *)qtcb + qtcb->header.log_start; 198 char *buf = (char *)qtcb + qtcb->header.log_start;
233 int len = qtcb->header.log_length; 199 int len = qtcb->header.log_length;
234 for (; len && !buf[len - 1]; len--); 200 for (; len && !buf[len - 1]; len--);
235 zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level, 201 zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf,
236 buf, len); 202 len);
237 } 203 }
238 204
239 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 205 spin_unlock_irqrestore(&dbf->hba_lock, flags);
240} 206}
241 207
242/** 208void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf,
243 * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer 209 struct fsf_status_read_buffer *status_buffer)
244 * @tag: tag indicating which kind of unsolicited status has been received
245 * @adapter: adapter that has issued the unsolicited status buffer
246 * @status_buffer: buffer containing payload of unsolicited status
247 */
248void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
249 struct fsf_status_read_buffer *status_buffer)
250{ 210{
251 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; 211 struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
252 unsigned long flags; 212 unsigned long flags;
253 213
254 spin_lock_irqsave(&adapter->hba_dbf_lock, flags); 214 spin_lock_irqsave(&dbf->hba_lock, flags);
255 memset(rec, 0, sizeof(*rec)); 215 memset(rec, 0, sizeof(*rec));
256 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); 216 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
257 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); 217 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
258 218
259 rec->u.status.failed = atomic_read(&adapter->stat_miss); 219 rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss);
260 if (status_buffer != NULL) { 220 if (status_buffer != NULL) {
261 rec->u.status.status_type = status_buffer->status_type; 221 rec->u.status.status_type = status_buffer->status_type;
262 rec->u.status.status_subtype = status_buffer->status_subtype; 222 rec->u.status.status_subtype = status_buffer->status_subtype;
@@ -293,63 +253,61 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
293 &status_buffer->payload, rec->u.status.payload_size); 253 &status_buffer->payload, rec->u.status.payload_size);
294 } 254 }
295 255
296 debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec)); 256 debug_event(dbf->hba, level, rec, sizeof(*rec));
297 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 257 spin_unlock_irqrestore(&dbf->hba_lock, flags);
298} 258}
299 259
300/** 260/**
301 * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure 261 * zfcp_dbf_hba_qdio - trace event for QDIO related failure
302 * @adapter: adapter affected by this QDIO related event 262 * @qdio: qdio structure affected by this QDIO related event
303 * @qdio_error: as passed by qdio module 263 * @qdio_error: as passed by qdio module
304 * @sbal_index: first buffer with error condition, as passed by qdio module 264 * @sbal_index: first buffer with error condition, as passed by qdio module
305 * @sbal_count: number of buffers affected, as passed by qdio module 265 * @sbal_count: number of buffers affected, as passed by qdio module
306 */ 266 */
307void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, 267void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error,
308 unsigned int qdio_error, int sbal_index, 268 int sbal_index, int sbal_count)
309 int sbal_count)
310{ 269{
311 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; 270 struct zfcp_dbf_hba_record *r = &dbf->hba_buf;
312 unsigned long flags; 271 unsigned long flags;
313 272
314 spin_lock_irqsave(&adapter->hba_dbf_lock, flags); 273 spin_lock_irqsave(&dbf->hba_lock, flags);
315 memset(r, 0, sizeof(*r)); 274 memset(r, 0, sizeof(*r));
316 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); 275 strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
317 r->u.qdio.qdio_error = qdio_error; 276 r->u.qdio.qdio_error = qdio_error;
318 r->u.qdio.sbal_index = sbal_index; 277 r->u.qdio.sbal_index = sbal_index;
319 r->u.qdio.sbal_count = sbal_count; 278 r->u.qdio.sbal_count = sbal_count;
320 debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); 279 debug_event(dbf->hba, 0, r, sizeof(*r));
321 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 280 spin_unlock_irqrestore(&dbf->hba_lock, flags);
322} 281}
323 282
324/** 283/**
325 * zfcp_hba_dbf_event_berr - trace event for bit error threshold 284 * zfcp_dbf_hba_berr - trace event for bit error threshold
326 * @adapter: adapter affected by this QDIO related event 285 * @dbf: dbf structure affected by this QDIO related event
327 * @req: fsf request 286 * @req: fsf request
328 */ 287 */
329void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter, 288void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req)
330 struct zfcp_fsf_req *req)
331{ 289{
332 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; 290 struct zfcp_dbf_hba_record *r = &dbf->hba_buf;
333 struct fsf_status_read_buffer *sr_buf = req->data; 291 struct fsf_status_read_buffer *sr_buf = req->data;
334 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; 292 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
335 unsigned long flags; 293 unsigned long flags;
336 294
337 spin_lock_irqsave(&adapter->hba_dbf_lock, flags); 295 spin_lock_irqsave(&dbf->hba_lock, flags);
338 memset(r, 0, sizeof(*r)); 296 memset(r, 0, sizeof(*r));
339 strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); 297 strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
340 memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); 298 memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
341 debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); 299 debug_event(dbf->hba, 0, r, sizeof(*r));
342 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 300 spin_unlock_irqrestore(&dbf->hba_lock, flags);
343} 301}
344static void zfcp_hba_dbf_view_response(char **p, 302static void zfcp_dbf_hba_view_response(char **p,
345 struct zfcp_hba_dbf_record_response *r) 303 struct zfcp_dbf_hba_record_response *r)
346{ 304{
347 struct timespec t; 305 struct timespec t;
348 306
349 zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); 307 zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command);
350 zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); 308 zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
351 zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); 309 zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno);
352 zfcp_dbf_timestamp(r->fsf_issued, &t); 310 stck_to_timespec(r->fsf_issued, &t);
353 zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); 311 zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
354 zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); 312 zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status);
355 zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); 313 zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status);
@@ -403,8 +361,8 @@ static void zfcp_hba_dbf_view_response(char **p,
403 } 361 }
404} 362}
405 363
406static void zfcp_hba_dbf_view_status(char **p, 364static void zfcp_dbf_hba_view_status(char **p,
407 struct zfcp_hba_dbf_record_status *r) 365 struct zfcp_dbf_hba_record_status *r)
408{ 366{
409 zfcp_dbf_out(p, "failed", "0x%02x", r->failed); 367 zfcp_dbf_out(p, "failed", "0x%02x", r->failed);
410 zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); 368 zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type);
@@ -416,14 +374,14 @@ static void zfcp_hba_dbf_view_status(char **p,
416 r->payload_size); 374 r->payload_size);
417} 375}
418 376
419static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) 377static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r)
420{ 378{
421 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); 379 zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
422 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); 380 zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
423 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); 381 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
424} 382}
425 383
426static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r) 384static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r)
427{ 385{
428 zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); 386 zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
429 zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); 387 zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
@@ -447,10 +405,10 @@ static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
447 r->current_transmit_b2b_credit); 405 r->current_transmit_b2b_credit);
448} 406}
449 407
450static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, 408static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view,
451 char *out_buf, const char *in_buf) 409 char *out_buf, const char *in_buf)
452{ 410{
453 struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf; 411 struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf;
454 char *p = out_buf; 412 char *p = out_buf;
455 413
456 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) 414 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
@@ -461,45 +419,42 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
461 zfcp_dbf_tag(&p, "tag2", r->tag2); 419 zfcp_dbf_tag(&p, "tag2", r->tag2);
462 420
463 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) 421 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
464 zfcp_hba_dbf_view_response(&p, &r->u.response); 422 zfcp_dbf_hba_view_response(&p, &r->u.response);
465 else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) 423 else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
466 zfcp_hba_dbf_view_status(&p, &r->u.status); 424 zfcp_dbf_hba_view_status(&p, &r->u.status);
467 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) 425 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
468 zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); 426 zfcp_dbf_hba_view_qdio(&p, &r->u.qdio);
469 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) 427 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
470 zfcp_hba_dbf_view_berr(&p, &r->u.berr); 428 zfcp_dbf_hba_view_berr(&p, &r->u.berr);
471 429
472 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) 430 if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0)
473 p += sprintf(p, "\n"); 431 p += sprintf(p, "\n");
474 return p - out_buf; 432 return p - out_buf;
475} 433}
476 434
477static struct debug_view zfcp_hba_dbf_view = { 435static struct debug_view zfcp_dbf_hba_view = {
478 "structured", 436 .name = "structured",
479 NULL, 437 .header_proc = zfcp_dbf_view_header,
480 &zfcp_dbf_view_header, 438 .format_proc = zfcp_dbf_hba_view_format,
481 &zfcp_hba_dbf_view_format,
482 NULL,
483 NULL
484}; 439};
485 440
486static const char *zfcp_rec_dbf_tags[] = { 441static const char *zfcp_dbf_rec_tags[] = {
487 [ZFCP_REC_DBF_ID_THREAD] = "thread", 442 [ZFCP_REC_DBF_ID_THREAD] = "thread",
488 [ZFCP_REC_DBF_ID_TARGET] = "target", 443 [ZFCP_REC_DBF_ID_TARGET] = "target",
489 [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", 444 [ZFCP_REC_DBF_ID_TRIGGER] = "trigger",
490 [ZFCP_REC_DBF_ID_ACTION] = "action", 445 [ZFCP_REC_DBF_ID_ACTION] = "action",
491}; 446};
492 447
493static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, 448static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
494 char *buf, const char *_rec) 449 char *buf, const char *_rec)
495{ 450{
496 struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec; 451 struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec;
497 char *p = buf; 452 char *p = buf;
498 char hint[ZFCP_DBF_ID_SIZE + 1]; 453 char hint[ZFCP_DBF_ID_SIZE + 1];
499 454
500 memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); 455 memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE);
501 hint[ZFCP_DBF_ID_SIZE] = 0; 456 hint[ZFCP_DBF_ID_SIZE] = 0;
502 zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]); 457 zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]);
503 zfcp_dbf_outs(&p, "hint", hint); 458 zfcp_dbf_outs(&p, "hint", hint);
504 switch (r->id) { 459 switch (r->id) {
505 case ZFCP_REC_DBF_ID_THREAD: 460 case ZFCP_REC_DBF_ID_THREAD:
@@ -537,24 +492,22 @@ static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
537 return p - buf; 492 return p - buf;
538} 493}
539 494
540static struct debug_view zfcp_rec_dbf_view = { 495static struct debug_view zfcp_dbf_rec_view = {
541 "structured", 496 .name = "structured",
542 NULL, 497 .header_proc = zfcp_dbf_view_header,
543 &zfcp_dbf_view_header, 498 .format_proc = zfcp_dbf_rec_view_format,
544 &zfcp_rec_dbf_view_format,
545 NULL,
546 NULL
547}; 499};
548 500
549/** 501/**
550 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation 502 * zfcp_dbf_rec_thread - trace event related to recovery thread operation
551 * @id2: identifier for event 503 * @id2: identifier for event
552 * @adapter: adapter 504 * @dbf: reference to dbf structure
553 * This function assumes that the caller is holding erp_lock. 505 * This function assumes that the caller is holding erp_lock.
554 */ 506 */
555void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) 507void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf)
556{ 508{
557 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 509 struct zfcp_adapter *adapter = dbf->adapter;
510 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
558 unsigned long flags = 0; 511 unsigned long flags = 0;
559 struct list_head *entry; 512 struct list_head *entry;
560 unsigned ready = 0, running = 0, total; 513 unsigned ready = 0, running = 0, total;
@@ -565,41 +518,41 @@ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter)
565 running++; 518 running++;
566 total = adapter->erp_total_count; 519 total = adapter->erp_total_count;
567 520
568 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 521 spin_lock_irqsave(&dbf->rec_lock, flags);
569 memset(r, 0, sizeof(*r)); 522 memset(r, 0, sizeof(*r));
570 r->id = ZFCP_REC_DBF_ID_THREAD; 523 r->id = ZFCP_REC_DBF_ID_THREAD;
571 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); 524 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
572 r->u.thread.total = total; 525 r->u.thread.total = total;
573 r->u.thread.ready = ready; 526 r->u.thread.ready = ready;
574 r->u.thread.running = running; 527 r->u.thread.running = running;
575 debug_event(adapter->rec_dbf, 6, r, sizeof(*r)); 528 debug_event(dbf->rec, 6, r, sizeof(*r));
576 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 529 spin_unlock_irqrestore(&dbf->rec_lock, flags);
577} 530}
578 531
579/** 532/**
580 * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation 533 * zfcp_dbf_rec_thread - trace event related to recovery thread operation
581 * @id2: identifier for event 534 * @id2: identifier for event
582 * @adapter: adapter 535 * @adapter: adapter
583 * This function assumes that the caller does not hold erp_lock. 536 * This function assumes that the caller does not hold erp_lock.
584 */ 537 */
585void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter) 538void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf)
586{ 539{
540 struct zfcp_adapter *adapter = dbf->adapter;
587 unsigned long flags; 541 unsigned long flags;
588 542
589 read_lock_irqsave(&adapter->erp_lock, flags); 543 read_lock_irqsave(&adapter->erp_lock, flags);
590 zfcp_rec_dbf_event_thread(id2, adapter); 544 zfcp_dbf_rec_thread(id2, dbf);
591 read_unlock_irqrestore(&adapter->erp_lock, flags); 545 read_unlock_irqrestore(&adapter->erp_lock, flags);
592} 546}
593 547
594static void zfcp_rec_dbf_event_target(char *id2, void *ref, 548static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf,
595 struct zfcp_adapter *adapter, 549 atomic_t *status, atomic_t *erp_count, u64 wwpn,
596 atomic_t *status, atomic_t *erp_count, 550 u32 d_id, u64 fcp_lun)
597 u64 wwpn, u32 d_id, u64 fcp_lun)
598{ 551{
599 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 552 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
600 unsigned long flags; 553 unsigned long flags;
601 554
602 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 555 spin_lock_irqsave(&dbf->rec_lock, flags);
603 memset(r, 0, sizeof(*r)); 556 memset(r, 0, sizeof(*r));
604 r->id = ZFCP_REC_DBF_ID_TARGET; 557 r->id = ZFCP_REC_DBF_ID_TARGET;
605 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); 558 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
@@ -609,56 +562,57 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref,
609 r->u.target.d_id = d_id; 562 r->u.target.d_id = d_id;
610 r->u.target.fcp_lun = fcp_lun; 563 r->u.target.fcp_lun = fcp_lun;
611 r->u.target.erp_count = atomic_read(erp_count); 564 r->u.target.erp_count = atomic_read(erp_count);
612 debug_event(adapter->rec_dbf, 3, r, sizeof(*r)); 565 debug_event(dbf->rec, 3, r, sizeof(*r));
613 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 566 spin_unlock_irqrestore(&dbf->rec_lock, flags);
614} 567}
615 568
616/** 569/**
617 * zfcp_rec_dbf_event_adapter - trace event for adapter state change 570 * zfcp_dbf_rec_adapter - trace event for adapter state change
618 * @id: identifier for trigger of state change 571 * @id: identifier for trigger of state change
619 * @ref: additional reference (e.g. request) 572 * @ref: additional reference (e.g. request)
620 * @adapter: adapter 573 * @dbf: reference to dbf structure
621 */ 574 */
622void zfcp_rec_dbf_event_adapter(char *id, void *ref, 575void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
623 struct zfcp_adapter *adapter)
624{ 576{
625 zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status, 577 struct zfcp_adapter *adapter = dbf->adapter;
578
579 zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
626 &adapter->erp_counter, 0, 0, 0); 580 &adapter->erp_counter, 0, 0, 0);
627} 581}
628 582
629/** 583/**
630 * zfcp_rec_dbf_event_port - trace event for port state change 584 * zfcp_dbf_rec_port - trace event for port state change
631 * @id: identifier for trigger of state change 585 * @id: identifier for trigger of state change
632 * @ref: additional reference (e.g. request) 586 * @ref: additional reference (e.g. request)
633 * @port: port 587 * @port: port
634 */ 588 */
635void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port) 589void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
636{ 590{
637 struct zfcp_adapter *adapter = port->adapter; 591 struct zfcp_dbf *dbf = port->adapter->dbf;
638 592
639 zfcp_rec_dbf_event_target(id, ref, adapter, &port->status, 593 zfcp_dbf_rec_target(id, ref, dbf, &port->status,
640 &port->erp_counter, port->wwpn, port->d_id, 594 &port->erp_counter, port->wwpn, port->d_id,
641 0); 595 0);
642} 596}
643 597
644/** 598/**
645 * zfcp_rec_dbf_event_unit - trace event for unit state change 599 * zfcp_dbf_rec_unit - trace event for unit state change
646 * @id: identifier for trigger of state change 600 * @id: identifier for trigger of state change
647 * @ref: additional reference (e.g. request) 601 * @ref: additional reference (e.g. request)
648 * @unit: unit 602 * @unit: unit
649 */ 603 */
650void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit) 604void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
651{ 605{
652 struct zfcp_port *port = unit->port; 606 struct zfcp_port *port = unit->port;
653 struct zfcp_adapter *adapter = port->adapter; 607 struct zfcp_dbf *dbf = port->adapter->dbf;
654 608
655 zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status, 609 zfcp_dbf_rec_target(id, ref, dbf, &unit->status,
656 &unit->erp_counter, port->wwpn, port->d_id, 610 &unit->erp_counter, port->wwpn, port->d_id,
657 unit->fcp_lun); 611 unit->fcp_lun);
658} 612}
659 613
660/** 614/**
661 * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery 615 * zfcp_dbf_rec_trigger - trace event for triggered error recovery
662 * @id2: identifier for error recovery trigger 616 * @id2: identifier for error recovery trigger
663 * @ref: additional reference (e.g. request) 617 * @ref: additional reference (e.g. request)
664 * @want: originally requested error recovery action 618 * @want: originally requested error recovery action
@@ -668,14 +622,15 @@ void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit)
668 * @port: port 622 * @port: port
669 * @unit: unit 623 * @unit: unit
670 */ 624 */
671void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, 625void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
672 void *action, struct zfcp_adapter *adapter, 626 struct zfcp_adapter *adapter, struct zfcp_port *port,
673 struct zfcp_port *port, struct zfcp_unit *unit) 627 struct zfcp_unit *unit)
674{ 628{
675 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 629 struct zfcp_dbf *dbf = adapter->dbf;
630 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
676 unsigned long flags; 631 unsigned long flags;
677 632
678 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 633 spin_lock_irqsave(&dbf->rec_lock, flags);
679 memset(r, 0, sizeof(*r)); 634 memset(r, 0, sizeof(*r));
680 r->id = ZFCP_REC_DBF_ID_TRIGGER; 635 r->id = ZFCP_REC_DBF_ID_TRIGGER;
681 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); 636 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
@@ -692,22 +647,22 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need,
692 r->u.trigger.us = atomic_read(&unit->status); 647 r->u.trigger.us = atomic_read(&unit->status);
693 r->u.trigger.fcp_lun = unit->fcp_lun; 648 r->u.trigger.fcp_lun = unit->fcp_lun;
694 } 649 }
695 debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r)); 650 debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
696 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 651 spin_unlock_irqrestore(&dbf->rec_lock, flags);
697} 652}
698 653
699/** 654/**
700 * zfcp_rec_dbf_event_action - trace event showing progress of recovery action 655 * zfcp_dbf_rec_action - trace event showing progress of recovery action
701 * @id2: identifier 656 * @id2: identifier
702 * @erp_action: error recovery action struct pointer 657 * @erp_action: error recovery action struct pointer
703 */ 658 */
704void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) 659void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
705{ 660{
706 struct zfcp_adapter *adapter = erp_action->adapter; 661 struct zfcp_dbf *dbf = erp_action->adapter->dbf;
707 struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; 662 struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
708 unsigned long flags; 663 unsigned long flags;
709 664
710 spin_lock_irqsave(&adapter->rec_dbf_lock, flags); 665 spin_lock_irqsave(&dbf->rec_lock, flags);
711 memset(r, 0, sizeof(*r)); 666 memset(r, 0, sizeof(*r));
712 r->id = ZFCP_REC_DBF_ID_ACTION; 667 r->id = ZFCP_REC_DBF_ID_ACTION;
713 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); 668 memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
@@ -715,26 +670,27 @@ void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action)
715 r->u.action.status = erp_action->status; 670 r->u.action.status = erp_action->status;
716 r->u.action.step = erp_action->step; 671 r->u.action.step = erp_action->step;
717 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; 672 r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
718 debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); 673 debug_event(dbf->rec, 5, r, sizeof(*r));
719 spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); 674 spin_unlock_irqrestore(&dbf->rec_lock, flags);
720} 675}
721 676
722/** 677/**
723 * zfcp_san_dbf_event_ct_request - trace event for issued CT request 678 * zfcp_dbf_san_ct_request - trace event for issued CT request
724 * @fsf_req: request containing issued CT data 679 * @fsf_req: request containing issued CT data
725 */ 680 */
726void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 681void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
727{ 682{
728 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 683 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
729 struct zfcp_wka_port *wka_port = ct->wka_port; 684 struct zfcp_wka_port *wka_port = ct->wka_port;
730 struct zfcp_adapter *adapter = wka_port->adapter; 685 struct zfcp_adapter *adapter = wka_port->adapter;
686 struct zfcp_dbf *dbf = adapter->dbf;
731 struct ct_hdr *hdr = sg_virt(ct->req); 687 struct ct_hdr *hdr = sg_virt(ct->req);
732 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 688 struct zfcp_dbf_san_record *r = &dbf->san_buf;
733 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; 689 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
734 int level = 3; 690 int level = 3;
735 unsigned long flags; 691 unsigned long flags;
736 692
737 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 693 spin_lock_irqsave(&dbf->san_lock, flags);
738 memset(r, 0, sizeof(*r)); 694 memset(r, 0, sizeof(*r));
739 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 695 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
740 r->fsf_reqid = fsf_req->req_id; 696 r->fsf_reqid = fsf_req->req_id;
@@ -749,28 +705,29 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
749 oct->max_res_size = hdr->max_res_size; 705 oct->max_res_size = hdr->max_res_size;
750 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), 706 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
751 ZFCP_DBF_SAN_MAX_PAYLOAD); 707 ZFCP_DBF_SAN_MAX_PAYLOAD);
752 debug_event(adapter->san_dbf, level, r, sizeof(*r)); 708 debug_event(dbf->san, level, r, sizeof(*r));
753 zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, 709 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
754 (void *)hdr + sizeof(struct ct_hdr), oct->len); 710 (void *)hdr + sizeof(struct ct_hdr), oct->len);
755 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 711 spin_unlock_irqrestore(&dbf->san_lock, flags);
756} 712}
757 713
758/** 714/**
759 * zfcp_san_dbf_event_ct_response - trace event for completion of CT request 715 * zfcp_dbf_san_ct_response - trace event for completion of CT request
760 * @fsf_req: request containing CT response 716 * @fsf_req: request containing CT response
761 */ 717 */
762void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 718void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
763{ 719{
764 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 720 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
765 struct zfcp_wka_port *wka_port = ct->wka_port; 721 struct zfcp_wka_port *wka_port = ct->wka_port;
766 struct zfcp_adapter *adapter = wka_port->adapter; 722 struct zfcp_adapter *adapter = wka_port->adapter;
767 struct ct_hdr *hdr = sg_virt(ct->resp); 723 struct ct_hdr *hdr = sg_virt(ct->resp);
768 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 724 struct zfcp_dbf *dbf = adapter->dbf;
769 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; 725 struct zfcp_dbf_san_record *r = &dbf->san_buf;
726 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
770 int level = 3; 727 int level = 3;
771 unsigned long flags; 728 unsigned long flags;
772 729
773 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 730 spin_lock_irqsave(&dbf->san_lock, flags);
774 memset(r, 0, sizeof(*r)); 731 memset(r, 0, sizeof(*r));
775 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 732 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
776 r->fsf_reqid = fsf_req->req_id; 733 r->fsf_reqid = fsf_req->req_id;
@@ -785,22 +742,22 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
785 rct->max_res_size = hdr->max_res_size; 742 rct->max_res_size = hdr->max_res_size;
786 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), 743 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
787 ZFCP_DBF_SAN_MAX_PAYLOAD); 744 ZFCP_DBF_SAN_MAX_PAYLOAD);
788 debug_event(adapter->san_dbf, level, r, sizeof(*r)); 745 debug_event(dbf->san, level, r, sizeof(*r));
789 zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, 746 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
790 (void *)hdr + sizeof(struct ct_hdr), rct->len); 747 (void *)hdr + sizeof(struct ct_hdr), rct->len);
791 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 748 spin_unlock_irqrestore(&dbf->san_lock, flags);
792} 749}
793 750
794static void zfcp_san_dbf_event_els(const char *tag, int level, 751static void zfcp_dbf_san_els(const char *tag, int level,
795 struct zfcp_fsf_req *fsf_req, u32 s_id, 752 struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id,
796 u32 d_id, u8 ls_code, void *buffer, 753 u8 ls_code, void *buffer, int buflen)
797 int buflen)
798{ 754{
799 struct zfcp_adapter *adapter = fsf_req->adapter; 755 struct zfcp_adapter *adapter = fsf_req->adapter;
800 struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf; 756 struct zfcp_dbf *dbf = adapter->dbf;
757 struct zfcp_dbf_san_record *rec = &dbf->san_buf;
801 unsigned long flags; 758 unsigned long flags;
802 759
803 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 760 spin_lock_irqsave(&dbf->san_lock, flags);
804 memset(rec, 0, sizeof(*rec)); 761 memset(rec, 0, sizeof(*rec));
805 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 762 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
806 rec->fsf_reqid = fsf_req->req_id; 763 rec->fsf_reqid = fsf_req->req_id;
@@ -808,45 +765,45 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
808 rec->s_id = s_id; 765 rec->s_id = s_id;
809 rec->d_id = d_id; 766 rec->d_id = d_id;
810 rec->u.els.ls_code = ls_code; 767 rec->u.els.ls_code = ls_code;
811 debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); 768 debug_event(dbf->san, level, rec, sizeof(*rec));
812 zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, 769 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
813 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); 770 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
814 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 771 spin_unlock_irqrestore(&dbf->san_lock, flags);
815} 772}
816 773
817/** 774/**
818 * zfcp_san_dbf_event_els_request - trace event for issued ELS 775 * zfcp_dbf_san_els_request - trace event for issued ELS
819 * @fsf_req: request containing issued ELS 776 * @fsf_req: request containing issued ELS
820 */ 777 */
821void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) 778void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
822{ 779{
823 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 780 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
824 781
825 zfcp_san_dbf_event_els("oels", 2, fsf_req, 782 zfcp_dbf_san_els("oels", 2, fsf_req,
826 fc_host_port_id(els->adapter->scsi_host), 783 fc_host_port_id(els->adapter->scsi_host),
827 els->d_id, *(u8 *) sg_virt(els->req), 784 els->d_id, *(u8 *) sg_virt(els->req),
828 sg_virt(els->req), els->req->length); 785 sg_virt(els->req), els->req->length);
829} 786}
830 787
831/** 788/**
832 * zfcp_san_dbf_event_els_response - trace event for completed ELS 789 * zfcp_dbf_san_els_response - trace event for completed ELS
833 * @fsf_req: request containing ELS response 790 * @fsf_req: request containing ELS response
834 */ 791 */
835void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) 792void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
836{ 793{
837 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 794 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
838 795
839 zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, 796 zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id,
840 fc_host_port_id(els->adapter->scsi_host), 797 fc_host_port_id(els->adapter->scsi_host),
841 *(u8 *)sg_virt(els->req), sg_virt(els->resp), 798 *(u8 *)sg_virt(els->req), sg_virt(els->resp),
842 els->resp->length); 799 els->resp->length);
843} 800}
844 801
845/** 802/**
846 * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS 803 * zfcp_dbf_san_incoming_els - trace event for incomig ELS
847 * @fsf_req: request containing unsolicited status buffer with incoming ELS 804 * @fsf_req: request containing unsolicited status buffer with incoming ELS
848 */ 805 */
849void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) 806void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
850{ 807{
851 struct zfcp_adapter *adapter = fsf_req->adapter; 808 struct zfcp_adapter *adapter = fsf_req->adapter;
852 struct fsf_status_read_buffer *buf = 809 struct fsf_status_read_buffer *buf =
@@ -854,16 +811,16 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
854 int length = (int)buf->length - 811 int length = (int)buf->length -
855 (int)((void *)&buf->payload - (void *)buf); 812 (int)((void *)&buf->payload - (void *)buf);
856 813
857 zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, 814 zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id,
858 fc_host_port_id(adapter->scsi_host), 815 fc_host_port_id(adapter->scsi_host),
859 buf->payload.data[0], (void *)buf->payload.data, 816 buf->payload.data[0], (void *)buf->payload.data,
860 length); 817 length);
861} 818}
862 819
863static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, 820static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
864 char *out_buf, const char *in_buf) 821 char *out_buf, const char *in_buf)
865{ 822{
866 struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; 823 struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf;
867 char *p = out_buf; 824 char *p = out_buf;
868 825
869 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) 826 if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
@@ -876,7 +833,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
876 zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id); 833 zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
877 834
878 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { 835 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
879 struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req; 836 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
880 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); 837 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
881 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); 838 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
882 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); 839 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
@@ -884,7 +841,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
884 zfcp_dbf_out(&p, "options", "0x%02x", ct->options); 841 zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
885 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); 842 zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
886 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { 843 } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
887 struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; 844 struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp;
888 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); 845 zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
889 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); 846 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
890 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); 847 zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
@@ -894,35 +851,30 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
894 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || 851 } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
895 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 852 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
896 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 853 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
897 struct zfcp_san_dbf_record_els *els = &r->u.els; 854 struct zfcp_dbf_san_record_els *els = &r->u.els;
898 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); 855 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code);
899 } 856 }
900 return p - out_buf; 857 return p - out_buf;
901} 858}
902 859
903static struct debug_view zfcp_san_dbf_view = { 860static struct debug_view zfcp_dbf_san_view = {
904 "structured", 861 .name = "structured",
905 NULL, 862 .header_proc = zfcp_dbf_view_header,
906 &zfcp_dbf_view_header, 863 .format_proc = zfcp_dbf_san_view_format,
907 &zfcp_san_dbf_view_format,
908 NULL,
909 NULL
910}; 864};
911 865
912static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, 866void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
913 struct zfcp_adapter *adapter, 867 struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd,
914 struct scsi_cmnd *scsi_cmnd, 868 struct zfcp_fsf_req *fsf_req, unsigned long old_req_id)
915 struct zfcp_fsf_req *fsf_req,
916 unsigned long old_req_id)
917{ 869{
918 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 870 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
919 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 871 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
920 unsigned long flags; 872 unsigned long flags;
921 struct fcp_rsp_iu *fcp_rsp; 873 struct fcp_rsp_iu *fcp_rsp;
922 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; 874 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
923 int offset = 0, buflen = 0; 875 int offset = 0, buflen = 0;
924 876
925 spin_lock_irqsave(&adapter->scsi_dbf_lock, flags); 877 spin_lock_irqsave(&dbf->scsi_lock, flags);
926 do { 878 do {
927 memset(rec, 0, sizeof(*rec)); 879 memset(rec, 0, sizeof(*rec));
928 if (offset == 0) { 880 if (offset == 0) {
@@ -976,68 +928,20 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
976 dump->offset = offset; 928 dump->offset = offset;
977 dump->size = min(buflen - offset, 929 dump->size = min(buflen - offset,
978 (int)sizeof(struct 930 (int)sizeof(struct
979 zfcp_scsi_dbf_record) - 931 zfcp_dbf_scsi_record) -
980 (int)sizeof(struct zfcp_dbf_dump)); 932 (int)sizeof(struct zfcp_dbf_dump));
981 memcpy(dump->data, fcp_sns_info + offset, dump->size); 933 memcpy(dump->data, fcp_sns_info + offset, dump->size);
982 offset += dump->size; 934 offset += dump->size;
983 } 935 }
984 debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec)); 936 debug_event(dbf->scsi, level, rec, sizeof(*rec));
985 } while (offset < buflen); 937 } while (offset < buflen);
986 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); 938 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
987}
988
989/**
990 * zfcp_scsi_dbf_event_result - trace event for SCSI command completion
991 * @tag: tag indicating success or failure of SCSI command
992 * @level: trace level applicable for this event
993 * @adapter: adapter that has been used to issue the SCSI command
994 * @scsi_cmnd: SCSI command pointer
995 * @fsf_req: request used to issue SCSI command (might be NULL)
996 */
997void zfcp_scsi_dbf_event_result(const char *tag, int level,
998 struct zfcp_adapter *adapter,
999 struct scsi_cmnd *scsi_cmnd,
1000 struct zfcp_fsf_req *fsf_req)
1001{
1002 zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0);
1003} 939}
1004 940
1005/** 941static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view,
1006 * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort
1007 * @tag: tag indicating success or failure of abort operation
1008 * @adapter: adapter thas has been used to issue SCSI command to be aborted
1009 * @scsi_cmnd: SCSI command to be aborted
1010 * @new_fsf_req: request containing abort (might be NULL)
1011 * @old_req_id: identifier of request containg SCSI command to be aborted
1012 */
1013void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
1014 struct scsi_cmnd *scsi_cmnd,
1015 struct zfcp_fsf_req *new_fsf_req,
1016 unsigned long old_req_id)
1017{
1018 zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req,
1019 old_req_id);
1020}
1021
1022/**
1023 * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset
1024 * @tag: tag indicating success or failure of reset operation
1025 * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
1026 * @unit: unit that needs reset
1027 * @scsi_cmnd: SCSI command which caused this error recovery
1028 */
1029void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag,
1030 struct zfcp_unit *unit,
1031 struct scsi_cmnd *scsi_cmnd)
1032{
1033 zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
1034 unit->port->adapter, scsi_cmnd, NULL, 0);
1035}
1036
1037static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view,
1038 char *out_buf, const char *in_buf) 942 char *out_buf, const char *in_buf)
1039{ 943{
1040 struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf; 944 struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf;
1041 struct timespec t; 945 struct timespec t;
1042 char *p = out_buf; 946 char *p = out_buf;
1043 947
@@ -1059,7 +963,7 @@ static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view,
1059 zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); 963 zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid);
1060 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); 964 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
1061 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); 965 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
1062 zfcp_dbf_timestamp(r->fsf_issued, &t); 966 stck_to_timespec(r->fsf_issued, &t);
1063 zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); 967 zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
1064 968
1065 if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { 969 if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
@@ -1078,84 +982,96 @@ static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view,
1078 return p - out_buf; 982 return p - out_buf;
1079} 983}
1080 984
1081static struct debug_view zfcp_scsi_dbf_view = { 985static struct debug_view zfcp_dbf_scsi_view = {
1082 "structured", 986 .name = "structured",
1083 NULL, 987 .header_proc = zfcp_dbf_view_header,
1084 &zfcp_dbf_view_header, 988 .format_proc = zfcp_dbf_scsi_view_format,
1085 &zfcp_scsi_dbf_view_format,
1086 NULL,
1087 NULL
1088}; 989};
1089 990
991static debug_info_t *zfcp_dbf_reg(const char *name, int level,
992 struct debug_view *view, int size)
993{
994 struct debug_info *d;
995
996 d = debug_register(name, dbfsize, level, size);
997 if (!d)
998 return NULL;
999
1000 debug_register_view(d, &debug_hex_ascii_view);
1001 debug_register_view(d, view);
1002 debug_set_level(d, level);
1003
1004 return d;
1005}
1006
1090/** 1007/**
1091 * zfcp_adapter_debug_register - registers debug feature for an adapter 1008 * zfcp_adapter_debug_register - registers debug feature for an adapter
1092 * @adapter: pointer to adapter for which debug features should be registered 1009 * @adapter: pointer to adapter for which debug features should be registered
1093 * return: -ENOMEM on error, 0 otherwise 1010 * return: -ENOMEM on error, 0 otherwise
1094 */ 1011 */
1095int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) 1012int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
1096{ 1013{
1097 char dbf_name[DEBUG_MAX_NAME_LEN]; 1014 char dbf_name[DEBUG_MAX_NAME_LEN];
1015 struct zfcp_dbf *dbf;
1016
1017 dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
1018 if (!dbf)
1019 return -ENOMEM;
1020
1021 dbf->adapter = adapter;
1022
1023 spin_lock_init(&dbf->hba_lock);
1024 spin_lock_init(&dbf->san_lock);
1025 spin_lock_init(&dbf->scsi_lock);
1026 spin_lock_init(&dbf->rec_lock);
1098 1027
1099 /* debug feature area which records recovery activity */ 1028 /* debug feature area which records recovery activity */
1100 sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); 1029 sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
1101 adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1, 1030 dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view,
1102 sizeof(struct zfcp_rec_dbf_record)); 1031 sizeof(struct zfcp_dbf_rec_record));
1103 if (!adapter->rec_dbf) 1032 if (!dbf->rec)
1104 goto failed; 1033 goto err_out;
1105 debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view);
1106 debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view);
1107 debug_set_level(adapter->rec_dbf, 3);
1108 1034
1109 /* debug feature area which records HBA (FSF and QDIO) conditions */ 1035 /* debug feature area which records HBA (FSF and QDIO) conditions */
1110 sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); 1036 sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
1111 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1, 1037 dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view,
1112 sizeof(struct zfcp_hba_dbf_record)); 1038 sizeof(struct zfcp_dbf_hba_record));
1113 if (!adapter->hba_dbf) 1039 if (!dbf->hba)
1114 goto failed; 1040 goto err_out;
1115 debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
1116 debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
1117 debug_set_level(adapter->hba_dbf, 3);
1118 1041
1119 /* debug feature area which records SAN command failures and recovery */ 1042 /* debug feature area which records SAN command failures and recovery */
1120 sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); 1043 sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
1121 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1, 1044 dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view,
1122 sizeof(struct zfcp_san_dbf_record)); 1045 sizeof(struct zfcp_dbf_san_record));
1123 if (!adapter->san_dbf) 1046 if (!dbf->san)
1124 goto failed; 1047 goto err_out;
1125 debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
1126 debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
1127 debug_set_level(adapter->san_dbf, 6);
1128 1048
1129 /* debug feature area which records SCSI command failures and recovery */ 1049 /* debug feature area which records SCSI command failures and recovery */
1130 sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); 1050 sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
1131 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1, 1051 dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view,
1132 sizeof(struct zfcp_scsi_dbf_record)); 1052 sizeof(struct zfcp_dbf_scsi_record));
1133 if (!adapter->scsi_dbf) 1053 if (!dbf->scsi)
1134 goto failed; 1054 goto err_out;
1135 debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
1136 debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
1137 debug_set_level(adapter->scsi_dbf, 3);
1138 1055
1056 adapter->dbf = dbf;
1139 return 0; 1057 return 0;
1140 1058
1141 failed: 1059err_out:
1142 zfcp_adapter_debug_unregister(adapter); 1060 zfcp_dbf_adapter_unregister(dbf);
1143
1144 return -ENOMEM; 1061 return -ENOMEM;
1145} 1062}
1146 1063
1147/** 1064/**
1148 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter 1065 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
1149 * @adapter: pointer to adapter for which debug features should be unregistered 1066 * @dbf: pointer to dbf for which debug features should be unregistered
1150 */ 1067 */
1151void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) 1068void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
1152{ 1069{
1153 debug_unregister(adapter->scsi_dbf); 1070 debug_unregister(dbf->scsi);
1154 debug_unregister(adapter->san_dbf); 1071 debug_unregister(dbf->san);
1155 debug_unregister(adapter->hba_dbf); 1072 debug_unregister(dbf->hba);
1156 debug_unregister(adapter->rec_dbf); 1073 debug_unregister(dbf->rec);
1157 adapter->scsi_dbf = NULL; 1074 dbf->adapter->dbf = NULL;
1158 adapter->san_dbf = NULL; 1075 kfree(dbf);
1159 adapter->hba_dbf = NULL;
1160 adapter->rec_dbf = NULL;
1161} 1076}
1077
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index a573f7344dd6..6b1461e8f847 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
2 * This file is part of the zfcp device driver for 2 * This file is part of the zfcp device driver for
3 * FCP adapters for IBM System z9 and zSeries. 3 * FCP adapters for IBM System z9 and zSeries.
4 * 4 *
5 * Copyright IBM Corp. 2008, 2008 5 * Copyright IBM Corp. 2008, 2009
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,9 @@
22#ifndef ZFCP_DBF_H 22#ifndef ZFCP_DBF_H
23#define ZFCP_DBF_H 23#define ZFCP_DBF_H
24 24
25#include "zfcp_ext.h"
25#include "zfcp_fsf.h" 26#include "zfcp_fsf.h"
27#include "zfcp_def.h"
26 28
27#define ZFCP_DBF_TAG_SIZE 4 29#define ZFCP_DBF_TAG_SIZE 4
28#define ZFCP_DBF_ID_SIZE 7 30#define ZFCP_DBF_ID_SIZE 7
@@ -35,13 +37,13 @@ struct zfcp_dbf_dump {
35 u8 data[]; /* dump data */ 37 u8 data[]; /* dump data */
36} __attribute__ ((packed)); 38} __attribute__ ((packed));
37 39
38struct zfcp_rec_dbf_record_thread { 40struct zfcp_dbf_rec_record_thread {
39 u32 total; 41 u32 total;
40 u32 ready; 42 u32 ready;
41 u32 running; 43 u32 running;
42}; 44};
43 45
44struct zfcp_rec_dbf_record_target { 46struct zfcp_dbf_rec_record_target {
45 u64 ref; 47 u64 ref;
46 u32 status; 48 u32 status;
47 u32 d_id; 49 u32 d_id;
@@ -50,7 +52,7 @@ struct zfcp_rec_dbf_record_target {
50 u32 erp_count; 52 u32 erp_count;
51}; 53};
52 54
53struct zfcp_rec_dbf_record_trigger { 55struct zfcp_dbf_rec_record_trigger {
54 u8 want; 56 u8 want;
55 u8 need; 57 u8 need;
56 u32 as; 58 u32 as;
@@ -62,21 +64,21 @@ struct zfcp_rec_dbf_record_trigger {
62 u64 fcp_lun; 64 u64 fcp_lun;
63}; 65};
64 66
65struct zfcp_rec_dbf_record_action { 67struct zfcp_dbf_rec_record_action {
66 u32 status; 68 u32 status;
67 u32 step; 69 u32 step;
68 u64 action; 70 u64 action;
69 u64 fsf_req; 71 u64 fsf_req;
70}; 72};
71 73
72struct zfcp_rec_dbf_record { 74struct zfcp_dbf_rec_record {
73 u8 id; 75 u8 id;
74 char id2[7]; 76 char id2[7];
75 union { 77 union {
76 struct zfcp_rec_dbf_record_action action; 78 struct zfcp_dbf_rec_record_action action;
77 struct zfcp_rec_dbf_record_thread thread; 79 struct zfcp_dbf_rec_record_thread thread;
78 struct zfcp_rec_dbf_record_target target; 80 struct zfcp_dbf_rec_record_target target;
79 struct zfcp_rec_dbf_record_trigger trigger; 81 struct zfcp_dbf_rec_record_trigger trigger;
80 } u; 82 } u;
81}; 83};
82 84
@@ -87,7 +89,7 @@ enum {
87 ZFCP_REC_DBF_ID_TRIGGER, 89 ZFCP_REC_DBF_ID_TRIGGER,
88}; 90};
89 91
90struct zfcp_hba_dbf_record_response { 92struct zfcp_dbf_hba_record_response {
91 u32 fsf_command; 93 u32 fsf_command;
92 u64 fsf_reqid; 94 u64 fsf_reqid;
93 u32 fsf_seqno; 95 u32 fsf_seqno;
@@ -125,7 +127,7 @@ struct zfcp_hba_dbf_record_response {
125 } u; 127 } u;
126} __attribute__ ((packed)); 128} __attribute__ ((packed));
127 129
128struct zfcp_hba_dbf_record_status { 130struct zfcp_dbf_hba_record_status {
129 u8 failed; 131 u8 failed;
130 u32 status_type; 132 u32 status_type;
131 u32 status_subtype; 133 u32 status_subtype;
@@ -139,24 +141,24 @@ struct zfcp_hba_dbf_record_status {
139 u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; 141 u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
140} __attribute__ ((packed)); 142} __attribute__ ((packed));
141 143
142struct zfcp_hba_dbf_record_qdio { 144struct zfcp_dbf_hba_record_qdio {
143 u32 qdio_error; 145 u32 qdio_error;
144 u8 sbal_index; 146 u8 sbal_index;
145 u8 sbal_count; 147 u8 sbal_count;
146} __attribute__ ((packed)); 148} __attribute__ ((packed));
147 149
148struct zfcp_hba_dbf_record { 150struct zfcp_dbf_hba_record {
149 u8 tag[ZFCP_DBF_TAG_SIZE]; 151 u8 tag[ZFCP_DBF_TAG_SIZE];
150 u8 tag2[ZFCP_DBF_TAG_SIZE]; 152 u8 tag2[ZFCP_DBF_TAG_SIZE];
151 union { 153 union {
152 struct zfcp_hba_dbf_record_response response; 154 struct zfcp_dbf_hba_record_response response;
153 struct zfcp_hba_dbf_record_status status; 155 struct zfcp_dbf_hba_record_status status;
154 struct zfcp_hba_dbf_record_qdio qdio; 156 struct zfcp_dbf_hba_record_qdio qdio;
155 struct fsf_bit_error_payload berr; 157 struct fsf_bit_error_payload berr;
156 } u; 158 } u;
157} __attribute__ ((packed)); 159} __attribute__ ((packed));
158 160
159struct zfcp_san_dbf_record_ct_request { 161struct zfcp_dbf_san_record_ct_request {
160 u16 cmd_req_code; 162 u16 cmd_req_code;
161 u8 revision; 163 u8 revision;
162 u8 gs_type; 164 u8 gs_type;
@@ -166,7 +168,7 @@ struct zfcp_san_dbf_record_ct_request {
166 u32 len; 168 u32 len;
167} __attribute__ ((packed)); 169} __attribute__ ((packed));
168 170
169struct zfcp_san_dbf_record_ct_response { 171struct zfcp_dbf_san_record_ct_response {
170 u16 cmd_rsp_code; 172 u16 cmd_rsp_code;
171 u8 revision; 173 u8 revision;
172 u8 reason_code; 174 u8 reason_code;
@@ -176,27 +178,27 @@ struct zfcp_san_dbf_record_ct_response {
176 u32 len; 178 u32 len;
177} __attribute__ ((packed)); 179} __attribute__ ((packed));
178 180
179struct zfcp_san_dbf_record_els { 181struct zfcp_dbf_san_record_els {
180 u8 ls_code; 182 u8 ls_code;
181 u32 len; 183 u32 len;
182} __attribute__ ((packed)); 184} __attribute__ ((packed));
183 185
184struct zfcp_san_dbf_record { 186struct zfcp_dbf_san_record {
185 u8 tag[ZFCP_DBF_TAG_SIZE]; 187 u8 tag[ZFCP_DBF_TAG_SIZE];
186 u64 fsf_reqid; 188 u64 fsf_reqid;
187 u32 fsf_seqno; 189 u32 fsf_seqno;
188 u32 s_id; 190 u32 s_id;
189 u32 d_id; 191 u32 d_id;
190 union { 192 union {
191 struct zfcp_san_dbf_record_ct_request ct_req; 193 struct zfcp_dbf_san_record_ct_request ct_req;
192 struct zfcp_san_dbf_record_ct_response ct_resp; 194 struct zfcp_dbf_san_record_ct_response ct_resp;
193 struct zfcp_san_dbf_record_els els; 195 struct zfcp_dbf_san_record_els els;
194 } u; 196 } u;
195#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 197#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
196 u8 payload[32]; 198 u8 payload[32];
197} __attribute__ ((packed)); 199} __attribute__ ((packed));
198 200
199struct zfcp_scsi_dbf_record { 201struct zfcp_dbf_scsi_record {
200 u8 tag[ZFCP_DBF_TAG_SIZE]; 202 u8 tag[ZFCP_DBF_TAG_SIZE];
201 u8 tag2[ZFCP_DBF_TAG_SIZE]; 203 u8 tag2[ZFCP_DBF_TAG_SIZE];
202 u32 scsi_id; 204 u32 scsi_id;
@@ -222,4 +224,127 @@ struct zfcp_scsi_dbf_record {
222 u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; 224 u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
223} __attribute__ ((packed)); 225} __attribute__ ((packed));
224 226
227struct zfcp_dbf {
228 debug_info_t *rec;
229 debug_info_t *hba;
230 debug_info_t *san;
231 debug_info_t *scsi;
232 spinlock_t rec_lock;
233 spinlock_t hba_lock;
234 spinlock_t san_lock;
235 spinlock_t scsi_lock;
236 struct zfcp_dbf_rec_record rec_buf;
237 struct zfcp_dbf_hba_record hba_buf;
238 struct zfcp_dbf_san_record san_buf;
239 struct zfcp_dbf_scsi_record scsi_buf;
240 struct zfcp_adapter *adapter;
241};
242
243static inline
244void zfcp_dbf_hba_fsf_resp(const char *tag2, int level,
245 struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
246{
247 if (level <= dbf->hba->level)
248 _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf);
249}
250
251/**
252 * zfcp_dbf_hba_fsf_response - trace event for request completion
253 * @fsf_req: request that has been completed
254 */
255static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
256{
257 struct zfcp_dbf *dbf = req->adapter->dbf;
258 struct fsf_qtcb *qtcb = req->qtcb;
259
260 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
261 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
262 zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf);
263
264 } else if (qtcb->header.fsf_status != FSF_GOOD) {
265 zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf);
266
267 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
268 (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
269 zfcp_dbf_hba_fsf_resp("open", 4, req, dbf);
270
271 } else if (qtcb->header.log_length) {
272 zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf);
273
274 } else {
275 zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf);
276 }
277 }
278
279/**
280 * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
281 * @tag: tag indicating which kind of unsolicited status has been received
282 * @dbf: reference to dbf structure
283 * @status_buffer: buffer containing payload of unsolicited status
284 */
285static inline
286void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
287 struct fsf_status_read_buffer *buf)
288{
289 int level = 2;
290
291 if (level <= dbf->hba->level)
292 _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
293}
294
295static inline
296void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
297 struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
298 struct zfcp_fsf_req *req, unsigned long old_id)
299{
300 if (level <= dbf->scsi->level)
301 _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id);
302}
303
304/**
305 * zfcp_dbf_scsi_result - trace event for SCSI command completion
306 * @tag: tag indicating success or failure of SCSI command
307 * @level: trace level applicable for this event
308 * @adapter: adapter that has been used to issue the SCSI command
309 * @scmd: SCSI command pointer
310 * @fsf_req: request used to issue SCSI command (might be NULL)
311 */
312static inline
313void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf,
314 struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
315{
316 zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0);
317}
318
319/**
320 * zfcp_dbf_scsi_abort - trace event for SCSI command abort
321 * @tag: tag indicating success or failure of abort operation
322 * @adapter: adapter thas has been used to issue SCSI command to be aborted
323 * @scmd: SCSI command to be aborted
324 * @new_req: request containing abort (might be NULL)
325 * @old_id: identifier of request containg SCSI command to be aborted
326 */
327static inline
328void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
329 struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req,
330 unsigned long old_id)
331{
332 zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id);
333}
334
335/**
336 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
337 * @tag: tag indicating success or failure of reset operation
338 * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
339 * @unit: unit that needs reset
340 * @scsi_cmnd: SCSI command which caused this error recovery
341 */
342static inline
343void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
344 struct scsi_cmnd *scsi_cmnd)
345{
346 zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
347 unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
348}
349
225#endif /* ZFCP_DBF_H */ 350#endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 49d0532bca1c..7da2fad8f515 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -37,10 +37,8 @@
37#include <asm/debug.h> 37#include <asm/debug.h>
38#include <asm/ebcdic.h> 38#include <asm/ebcdic.h>
39#include <asm/sysinfo.h> 39#include <asm/sysinfo.h>
40#include "zfcp_dbf.h"
41#include "zfcp_fsf.h" 40#include "zfcp_fsf.h"
42 41
43
44/********************* GENERAL DEFINES *********************************/ 42/********************* GENERAL DEFINES *********************************/
45 43
46#define REQUEST_LIST_SIZE 128 44#define REQUEST_LIST_SIZE 128
@@ -75,9 +73,6 @@
75 73
76/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 74/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
77 75
78/* timeout for name-server lookup (in seconds) */
79#define ZFCP_NS_GID_PN_TIMEOUT 10
80
81/* task attribute values in FCP-2 FCP_CMND IU */ 76/* task attribute values in FCP-2 FCP_CMND IU */
82#define SIMPLE_Q 0 77#define SIMPLE_Q 0
83#define HEAD_OF_Q 1 78#define HEAD_OF_Q 1
@@ -224,8 +219,6 @@ struct zfcp_ls_adisc {
224#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 219#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
225#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 220#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
226#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 221#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
227#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
228#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
229#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 222#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
230#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 223#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
231 224
@@ -234,6 +227,7 @@ struct zfcp_ls_adisc {
234 227
235/* remote port status */ 228/* remote port status */
236#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 229#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
230#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
237 231
238/* well known address (WKA) port status*/ 232/* well known address (WKA) port status*/
239enum zfcp_wka_status { 233enum zfcp_wka_status {
@@ -249,7 +243,6 @@ enum zfcp_wka_status {
249 243
250/* FSF request status (this does not have a common part) */ 244/* FSF request status (this does not have a common part) */
251#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 245#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
252#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
253#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 246#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
254#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 247#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
255#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 248#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
@@ -266,12 +259,14 @@ struct zfcp_fsf_req;
266 259
267/* holds various memory pools of an adapter */ 260/* holds various memory pools of an adapter */
268struct zfcp_adapter_mempool { 261struct zfcp_adapter_mempool {
269 mempool_t *fsf_req_erp; 262 mempool_t *erp_req;
270 mempool_t *fsf_req_scsi; 263 mempool_t *gid_pn_req;
271 mempool_t *fsf_req_abort; 264 mempool_t *scsi_req;
272 mempool_t *fsf_req_status_read; 265 mempool_t *scsi_abort;
273 mempool_t *data_status_read; 266 mempool_t *status_read_req;
274 mempool_t *data_gid_pn; 267 mempool_t *status_read_data;
268 mempool_t *gid_pn_data;
269 mempool_t *qtcb_pool;
275}; 270};
276 271
277/* 272/*
@@ -305,6 +300,15 @@ struct ct_iu_gid_pn_resp {
305 u32 d_id; 300 u32 d_id;
306} __attribute__ ((packed)); 301} __attribute__ ((packed));
307 302
303struct ct_iu_gpn_ft_req {
304 struct ct_hdr header;
305 u8 flags;
306 u8 domain_id_scope;
307 u8 area_id_scope;
308 u8 fc4_type;
309} __attribute__ ((packed));
310
311
308/** 312/**
309 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct 313 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
310 * @wka_port: port where the request is sent to 314 * @wka_port: port where the request is sent to
@@ -312,7 +316,6 @@ struct ct_iu_gid_pn_resp {
312 * @resp: scatter-gather list for response 316 * @resp: scatter-gather list for response
313 * @handler: handler function (called for response to the request) 317 * @handler: handler function (called for response to the request)
314 * @handler_data: data passed to handler function 318 * @handler_data: data passed to handler function
315 * @timeout: FSF timeout for this request
316 * @completion: completion for synchronization purposes 319 * @completion: completion for synchronization purposes
317 * @status: used to pass error status to calling function 320 * @status: used to pass error status to calling function
318 */ 321 */
@@ -322,7 +325,6 @@ struct zfcp_send_ct {
322 struct scatterlist *resp; 325 struct scatterlist *resp;
323 void (*handler)(unsigned long); 326 void (*handler)(unsigned long);
324 unsigned long handler_data; 327 unsigned long handler_data;
325 int timeout;
326 struct completion *completion; 328 struct completion *completion;
327 int status; 329 int status;
328}; 330};
@@ -420,6 +422,29 @@ struct zfcp_latencies {
420 spinlock_t lock; 422 spinlock_t lock;
421}; 423};
422 424
425/** struct zfcp_qdio - basic QDIO data structure
426 * @resp_q: response queue
427 * @req_q: request queue
428 * @stat_lock: lock to protect req_q_util and req_q_time
429 * @req_q_lock; lock to serialize access to request queue
430 * @req_q_time: time of last fill level change
431 * @req_q_util: used for accounting
432 * @req_q_full: queue full incidents
433 * @req_q_wq: used to wait for SBAL availability
434 * @adapter: adapter used in conjunction with this QDIO structure
435 */
436struct zfcp_qdio {
437 struct zfcp_qdio_queue resp_q;
438 struct zfcp_qdio_queue req_q;
439 spinlock_t stat_lock;
440 spinlock_t req_q_lock;
441 unsigned long long req_q_time;
442 u64 req_q_util;
443 atomic_t req_q_full;
444 wait_queue_head_t req_q_wq;
445 struct zfcp_adapter *adapter;
446};
447
423struct zfcp_adapter { 448struct zfcp_adapter {
424 atomic_t refcount; /* reference count */ 449 atomic_t refcount; /* reference count */
425 wait_queue_head_t remove_wq; /* can be used to wait for 450 wait_queue_head_t remove_wq; /* can be used to wait for
@@ -428,6 +453,7 @@ struct zfcp_adapter {
428 u64 peer_wwpn; /* P2P peer WWPN */ 453 u64 peer_wwpn; /* P2P peer WWPN */
429 u32 peer_d_id; /* P2P peer D_ID */ 454 u32 peer_d_id; /* P2P peer D_ID */
430 struct ccw_device *ccw_device; /* S/390 ccw device */ 455 struct ccw_device *ccw_device; /* S/390 ccw device */
456 struct zfcp_qdio *qdio;
431 u32 hydra_version; /* Hydra version */ 457 u32 hydra_version; /* Hydra version */
432 u32 fsf_lic_version; 458 u32 fsf_lic_version;
433 u32 adapter_features; /* FCP channel features */ 459 u32 adapter_features; /* FCP channel features */
@@ -439,15 +465,7 @@ struct zfcp_adapter {
439 unsigned long req_no; /* unique FSF req number */ 465 unsigned long req_no; /* unique FSF req number */
440 struct list_head *req_list; /* list of pending reqs */ 466 struct list_head *req_list; /* list of pending reqs */
441 spinlock_t req_list_lock; /* request list lock */ 467 spinlock_t req_list_lock; /* request list lock */
442 struct zfcp_qdio_queue req_q; /* request queue */
443 spinlock_t req_q_lock; /* for operations on queue */
444 ktime_t req_q_time; /* time of last fill level change */
445 u64 req_q_util; /* for accounting */
446 spinlock_t qdio_stat_lock;
447 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 468 u32 fsf_req_seq_no; /* FSF cmnd seq number */
448 wait_queue_head_t request_wq; /* can be used to wait for
449 more avaliable SBALs */
450 struct zfcp_qdio_queue resp_q; /* response queue */
451 rwlock_t abort_lock; /* Protects against SCSI 469 rwlock_t abort_lock; /* Protects against SCSI
452 stack abort/command 470 stack abort/command
453 completion races */ 471 completion races */
@@ -456,10 +474,9 @@ struct zfcp_adapter {
456 atomic_t status; /* status of this adapter */ 474 atomic_t status; /* status of this adapter */
457 struct list_head erp_ready_head; /* error recovery for this 475 struct list_head erp_ready_head; /* error recovery for this
458 adapter/devices */ 476 adapter/devices */
477 wait_queue_head_t erp_ready_wq;
459 struct list_head erp_running_head; 478 struct list_head erp_running_head;
460 rwlock_t erp_lock; 479 rwlock_t erp_lock;
461 struct semaphore erp_ready_sem;
462 wait_queue_head_t erp_thread_wqh;
463 wait_queue_head_t erp_done_wqh; 480 wait_queue_head_t erp_done_wqh;
464 struct zfcp_erp_action erp_action; /* pending error recovery */ 481 struct zfcp_erp_action erp_action; /* pending error recovery */
465 atomic_t erp_counter; 482 atomic_t erp_counter;
@@ -467,27 +484,16 @@ struct zfcp_adapter {
467 actions */ 484 actions */
468 u32 erp_low_mem_count; /* nr of erp actions waiting 485 u32 erp_low_mem_count; /* nr of erp actions waiting
469 for memory */ 486 for memory */
487 struct task_struct *erp_thread;
470 struct zfcp_wka_ports *gs; /* generic services */ 488 struct zfcp_wka_ports *gs; /* generic services */
471 debug_info_t *rec_dbf; 489 struct zfcp_dbf *dbf; /* debug traces */
472 debug_info_t *hba_dbf;
473 debug_info_t *san_dbf; /* debug feature areas */
474 debug_info_t *scsi_dbf;
475 spinlock_t rec_dbf_lock;
476 spinlock_t hba_dbf_lock;
477 spinlock_t san_dbf_lock;
478 spinlock_t scsi_dbf_lock;
479 struct zfcp_rec_dbf_record rec_dbf_buf;
480 struct zfcp_hba_dbf_record hba_dbf_buf;
481 struct zfcp_san_dbf_record san_dbf_buf;
482 struct zfcp_scsi_dbf_record scsi_dbf_buf;
483 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 490 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
484 struct qdio_initialize qdio_init_data; /* for qdio_establish */
485 struct fc_host_statistics *fc_stats; 491 struct fc_host_statistics *fc_stats;
486 struct fsf_qtcb_bottom_port *stats_reset_data; 492 struct fsf_qtcb_bottom_port *stats_reset_data;
487 unsigned long stats_reset; 493 unsigned long stats_reset;
488 struct work_struct scan_work; 494 struct work_struct scan_work;
489 struct service_level service_level; 495 struct service_level service_level;
490 atomic_t qdio_outb_full; /* queue full incidents */ 496 struct workqueue_struct *work_queue;
491}; 497};
492 498
493struct zfcp_port { 499struct zfcp_port {
@@ -531,36 +537,64 @@ struct zfcp_unit {
531 struct work_struct scsi_work; 537 struct work_struct scsi_work;
532}; 538};
533 539
534/* FSF request */ 540/**
541 * struct zfcp_queue_req - queue related values for a request
542 * @sbal_number: number of free SBALs
543 * @sbal_first: first SBAL for this request
544 * @sbal_last: last SBAL for this request
545 * @sbal_limit: last possible SBAL for this request
546 * @sbale_curr: current SBALE at creation of this request
547 * @sbal_response: SBAL used in interrupt
548 * @qdio_outb_usage: usage of outbound queue
549 * @qdio_inb_usage: usage of inbound queue
550 */
551struct zfcp_queue_req {
552 u8 sbal_number;
553 u8 sbal_first;
554 u8 sbal_last;
555 u8 sbal_limit;
556 u8 sbale_curr;
557 u8 sbal_response;
558 u16 qdio_outb_usage;
559 u16 qdio_inb_usage;
560};
561
562/**
563 * struct zfcp_fsf_req - basic FSF request structure
564 * @list: list of FSF requests
565 * @req_id: unique request ID
566 * @adapter: adapter this request belongs to
567 * @queue_req: queue related values
568 * @completion: used to signal the completion of the request
569 * @status: status of the request
570 * @fsf_command: FSF command issued
571 * @qtcb: associated QTCB
572 * @seq_no: sequence number of this request
573 * @data: private data
574 * @timer: timer data of this request
575 * @erp_action: reference to erp action if request issued on behalf of ERP
576 * @pool: reference to memory pool if used for this request
577 * @issued: time when request was send (STCK)
578 * @unit: reference to unit if this request is a SCSI request
579 * @handler: handler which should be called to process response
580 */
535struct zfcp_fsf_req { 581struct zfcp_fsf_req {
536 struct list_head list; /* list of FSF requests */ 582 struct list_head list;
537 unsigned long req_id; /* unique request ID */ 583 unsigned long req_id;
538 struct zfcp_adapter *adapter; /* adapter request belongs to */ 584 struct zfcp_adapter *adapter;
539 u8 sbal_number; /* nr of SBALs free for use */ 585 struct zfcp_queue_req queue_req;
540 u8 sbal_first; /* first SBAL for this request */ 586 struct completion completion;
541 u8 sbal_last; /* last SBAL for this request */ 587 u32 status;
542 u8 sbal_limit; /* last possible SBAL for 588 u32 fsf_command;
543 this reuest */ 589 struct fsf_qtcb *qtcb;
544 u8 sbale_curr; /* current SBALE during creation 590 u32 seq_no;
545 of request */ 591 void *data;
546 u8 sbal_response; /* SBAL used in interrupt */ 592 struct timer_list timer;
547 wait_queue_head_t completion_wq; /* can be used by a routine 593 struct zfcp_erp_action *erp_action;
548 to wait for completion */ 594 mempool_t *pool;
549 u32 status; /* status of this request */ 595 unsigned long long issued;
550 u32 fsf_command; /* FSF Command copy */ 596 struct zfcp_unit *unit;
551 struct fsf_qtcb *qtcb; /* address of associated QTCB */
552 u32 seq_no; /* Sequence number of request */
553 void *data; /* private data of request */
554 struct timer_list timer; /* used for erp or scsi er */
555 struct zfcp_erp_action *erp_action; /* used if this request is
556 issued on behalf of erp */
557 mempool_t *pool; /* used if request was alloacted
558 from emergency pool */
559 unsigned long long issued; /* request sent time (STCK) */
560 struct zfcp_unit *unit;
561 void (*handler)(struct zfcp_fsf_req *); 597 void (*handler)(struct zfcp_fsf_req *);
562 u16 qdio_outb_usage;/* usage of outbound queue */
563 u16 qdio_inb_usage; /* usage of inbound queue */
564}; 598};
565 599
566/* driver data */ 600/* driver data */
@@ -570,18 +604,11 @@ struct zfcp_data {
570 rwlock_t config_lock; /* serialises changes 604 rwlock_t config_lock; /* serialises changes
571 to adapter/port/unit 605 to adapter/port/unit
572 lists */ 606 lists */
573 struct semaphore config_sema; /* serialises configuration 607 struct mutex config_mutex;
574 changes */ 608 struct kmem_cache *gpn_ft_cache;
575 struct kmem_cache *fsf_req_qtcb_cache; 609 struct kmem_cache *qtcb_cache;
576 struct kmem_cache *sr_buffer_cache; 610 struct kmem_cache *sr_buffer_cache;
577 struct kmem_cache *gid_pn_cache; 611 struct kmem_cache *gid_pn_cache;
578 struct workqueue_struct *work_queue;
579};
580
581/* struct used by memory pools for fsf_requests */
582struct zfcp_fsf_req_qtcb {
583 struct zfcp_fsf_req fsf_req;
584 struct fsf_qtcb qtcb;
585}; 612};
586 613
587/********************** ZFCP SPECIFIC DEFINES ********************************/ 614/********************** ZFCP SPECIFIC DEFINES ********************************/
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c75d6f35cb5f..73d366ba31e5 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -9,6 +9,7 @@
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/kthread.h>
12#include "zfcp_ext.h" 13#include "zfcp_ext.h"
13 14
14#define ZFCP_MAX_ERPS 3 15#define ZFCP_MAX_ERPS 3
@@ -26,7 +27,6 @@ enum zfcp_erp_steps {
26 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, 27 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
27 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 28 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
28 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 29 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
29 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
30 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 30 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
31 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, 31 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
32 ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, 32 ZFCP_ERP_STEP_UNIT_OPENING = 0x2000,
@@ -75,9 +75,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
75 struct zfcp_adapter *adapter = act->adapter; 75 struct zfcp_adapter *adapter = act->adapter;
76 76
77 list_move(&act->list, &act->adapter->erp_ready_head); 77 list_move(&act->list, &act->adapter->erp_ready_head);
78 zfcp_rec_dbf_event_action("erardy1", act); 78 zfcp_dbf_rec_action("erardy1", act);
79 up(&adapter->erp_ready_sem); 79 wake_up(&adapter->erp_ready_wq);
80 zfcp_rec_dbf_event_thread("erardy2", adapter); 80 zfcp_dbf_rec_thread("erardy2", adapter->dbf);
81} 81}
82 82
83static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) 83static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -150,6 +150,9 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
150 a_status = atomic_read(&adapter->status); 150 a_status = atomic_read(&adapter->status);
151 if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) 151 if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
152 return 0; 152 return 0;
153 if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
154 !(a_status & ZFCP_STATUS_COMMON_OPEN))
155 return 0; /* shutdown requested for closed adapter */
153 } 156 }
154 157
155 return need; 158 return need;
@@ -213,8 +216,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
213 int retval = 1, need; 216 int retval = 1, need;
214 struct zfcp_erp_action *act = NULL; 217 struct zfcp_erp_action *act = NULL;
215 218
216 if (!(atomic_read(&adapter->status) & 219 if (!adapter->erp_thread)
217 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP))
218 return -EIO; 220 return -EIO;
219 221
220 need = zfcp_erp_required_act(want, adapter, port, unit); 222 need = zfcp_erp_required_act(want, adapter, port, unit);
@@ -227,12 +229,11 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
227 goto out; 229 goto out;
228 ++adapter->erp_total_count; 230 ++adapter->erp_total_count;
229 list_add_tail(&act->list, &adapter->erp_ready_head); 231 list_add_tail(&act->list, &adapter->erp_ready_head);
230 up(&adapter->erp_ready_sem); 232 wake_up(&adapter->erp_ready_wq);
231 zfcp_rec_dbf_event_thread("eracte1", adapter); 233 zfcp_dbf_rec_thread("eracte1", adapter->dbf);
232 retval = 0; 234 retval = 0;
233 out: 235 out:
234 zfcp_rec_dbf_event_trigger(id, ref, want, need, act, 236 zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit);
235 adapter, port, unit);
236 return retval; 237 return retval;
237} 238}
238 239
@@ -443,28 +444,28 @@ static int status_change_clear(unsigned long mask, atomic_t *status)
443static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 444static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
444{ 445{
445 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) 446 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
446 zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter); 447 zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf);
447 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 448 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
448} 449}
449 450
450static void zfcp_erp_port_unblock(struct zfcp_port *port) 451static void zfcp_erp_port_unblock(struct zfcp_port *port)
451{ 452{
452 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) 453 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
453 zfcp_rec_dbf_event_port("erpubl1", NULL, port); 454 zfcp_dbf_rec_port("erpubl1", NULL, port);
454 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 455 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
455} 456}
456 457
457static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) 458static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
458{ 459{
459 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) 460 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
460 zfcp_rec_dbf_event_unit("eruubl1", NULL, unit); 461 zfcp_dbf_rec_unit("eruubl1", NULL, unit);
461 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); 462 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
462} 463}
463 464
464static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) 465static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
465{ 466{
466 list_move(&erp_action->list, &erp_action->adapter->erp_running_head); 467 list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
467 zfcp_rec_dbf_event_action("erator1", erp_action); 468 zfcp_dbf_rec_action("erator1", erp_action);
468} 469}
469 470
470static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) 471static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -480,13 +481,12 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
480 if (act->status & (ZFCP_STATUS_ERP_DISMISSED | 481 if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
481 ZFCP_STATUS_ERP_TIMEDOUT)) { 482 ZFCP_STATUS_ERP_TIMEDOUT)) {
482 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 483 act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
483 zfcp_rec_dbf_event_action("erscf_1", act); 484 zfcp_dbf_rec_action("erscf_1", act);
484 act->fsf_req->erp_action = NULL; 485 act->fsf_req->erp_action = NULL;
485 } 486 }
486 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 487 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
487 zfcp_rec_dbf_event_action("erscf_2", act); 488 zfcp_dbf_rec_action("erscf_2", act);
488 if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED | 489 if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
489 ZFCP_STATUS_FSFREQ_DISMISSED))
490 act->fsf_req = NULL; 490 act->fsf_req = NULL;
491 } else 491 } else
492 act->fsf_req = NULL; 492 act->fsf_req = NULL;
@@ -604,9 +604,11 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
604 604
605static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) 605static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
606{ 606{
607 if (zfcp_qdio_open(act->adapter)) 607 struct zfcp_qdio *qdio = act->adapter->qdio;
608
609 if (zfcp_qdio_open(qdio))
608 return ZFCP_ERP_FAILED; 610 return ZFCP_ERP_FAILED;
609 init_waitqueue_head(&act->adapter->request_wq); 611 init_waitqueue_head(&qdio->req_q_wq);
610 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); 612 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
611 return ZFCP_ERP_SUCCEEDED; 613 return ZFCP_ERP_SUCCEEDED;
612} 614}
@@ -641,9 +643,10 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
641 return ZFCP_ERP_FAILED; 643 return ZFCP_ERP_FAILED;
642 } 644 }
643 645
644 zfcp_rec_dbf_event_thread_lock("erasfx1", adapter); 646 zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
645 down(&adapter->erp_ready_sem); 647 wait_event(adapter->erp_ready_wq,
646 zfcp_rec_dbf_event_thread_lock("erasfx2", adapter); 648 !list_empty(&adapter->erp_ready_head));
649 zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
647 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) 650 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
648 break; 651 break;
649 652
@@ -682,9 +685,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
682 if (ret) 685 if (ret)
683 return ZFCP_ERP_FAILED; 686 return ZFCP_ERP_FAILED;
684 687
685 zfcp_rec_dbf_event_thread_lock("erasox1", adapter); 688 zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf);
686 down(&adapter->erp_ready_sem); 689 wait_event(adapter->erp_ready_wq,
687 zfcp_rec_dbf_event_thread_lock("erasox2", adapter); 690 !list_empty(&adapter->erp_ready_head));
691 zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf);
688 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) 692 if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
689 return ZFCP_ERP_FAILED; 693 return ZFCP_ERP_FAILED;
690 694
@@ -711,10 +715,10 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
711 struct zfcp_adapter *adapter = act->adapter; 715 struct zfcp_adapter *adapter = act->adapter;
712 716
713 /* close queues to ensure that buffers are not accessed by adapter */ 717 /* close queues to ensure that buffers are not accessed by adapter */
714 zfcp_qdio_close(adapter); 718 zfcp_qdio_close(adapter->qdio);
715 zfcp_fsf_req_dismiss_all(adapter); 719 zfcp_fsf_req_dismiss_all(adapter);
716 adapter->fsf_req_seq_no = 0; 720 adapter->fsf_req_seq_no = 0;
717 zfcp_fc_wka_port_force_offline(&adapter->gs->ds); 721 zfcp_fc_wka_ports_force_offline(adapter->gs);
718 /* all ports and units are closed */ 722 /* all ports and units are closed */
719 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, 723 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
720 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 724 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
@@ -841,27 +845,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
841 return zfcp_erp_port_strategy_open_port(act); 845 return zfcp_erp_port_strategy_open_port(act);
842} 846}
843 847
844void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
845{
846 int retval;
847 struct zfcp_port *port = container_of(work, struct zfcp_port,
848 gid_pn_work);
849
850 retval = zfcp_fc_ns_gid_pn(&port->erp_action);
851 if (!retval) {
852 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
853 goto out;
854 }
855 if (retval == -ENOMEM) {
856 zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM);
857 goto out;
858 }
859 /* all other error condtions */
860 zfcp_erp_notify(&port->erp_action, 0);
861out:
862 zfcp_port_put(port);
863}
864
865static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) 848static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
866{ 849{
867 struct zfcp_adapter *adapter = act->adapter; 850 struct zfcp_adapter *adapter = act->adapter;
@@ -876,15 +859,11 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
876 return zfcp_erp_open_ptp_port(act); 859 return zfcp_erp_open_ptp_port(act);
877 if (!port->d_id) { 860 if (!port->d_id) {
878 zfcp_port_get(port); 861 zfcp_port_get(port);
879 if (!queue_work(zfcp_data.work_queue, 862 if (!queue_work(adapter->work_queue,
880 &port->gid_pn_work)) 863 &port->gid_pn_work))
881 zfcp_port_put(port); 864 zfcp_port_put(port);
882 return ZFCP_ERP_CONTINUES; 865 return ZFCP_ERP_EXIT;
883 } 866 }
884 /* fall through */
885 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
886 if (!port->d_id)
887 return ZFCP_ERP_FAILED;
888 return zfcp_erp_port_strategy_open_port(act); 867 return zfcp_erp_port_strategy_open_port(act);
889 868
890 case ZFCP_ERP_STEP_PORT_OPENING: 869 case ZFCP_ERP_STEP_PORT_OPENING:
@@ -1163,7 +1142,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1163 } 1142 }
1164 1143
1165 list_del(&erp_action->list); 1144 list_del(&erp_action->list);
1166 zfcp_rec_dbf_event_action("eractd1", erp_action); 1145 zfcp_dbf_rec_action("eractd1", erp_action);
1167 1146
1168 switch (erp_action->action) { 1147 switch (erp_action->action) {
1169 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1148 case ZFCP_ERP_ACTION_REOPEN_UNIT:
@@ -1311,20 +1290,16 @@ static int zfcp_erp_thread(void *data)
1311 struct list_head *next; 1290 struct list_head *next;
1312 struct zfcp_erp_action *act; 1291 struct zfcp_erp_action *act;
1313 unsigned long flags; 1292 unsigned long flags;
1314 int ignore;
1315
1316 daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
1317 /* Block all signals */
1318 siginitsetinv(&current->blocked, 0);
1319 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1320 wake_up(&adapter->erp_thread_wqh);
1321 1293
1322 while (!(atomic_read(&adapter->status) & 1294 for (;;) {
1323 ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) { 1295 zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
1296 wait_event_interruptible(adapter->erp_ready_wq,
1297 !list_empty(&adapter->erp_ready_head) ||
1298 kthread_should_stop());
1299 zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
1324 1300
1325 zfcp_rec_dbf_event_thread_lock("erthrd1", adapter); 1301 if (kthread_should_stop())
1326 ignore = down_interruptible(&adapter->erp_ready_sem); 1302 break;
1327 zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
1328 1303
1329 write_lock_irqsave(&adapter->erp_lock, flags); 1304 write_lock_irqsave(&adapter->erp_lock, flags);
1330 next = adapter->erp_ready_head.next; 1305 next = adapter->erp_ready_head.next;
@@ -1339,9 +1314,6 @@ static int zfcp_erp_thread(void *data)
1339 } 1314 }
1340 } 1315 }
1341 1316
1342 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
1343 wake_up(&adapter->erp_thread_wqh);
1344
1345 return 0; 1317 return 0;
1346} 1318}
1347 1319
@@ -1353,18 +1325,17 @@ static int zfcp_erp_thread(void *data)
1353 */ 1325 */
1354int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) 1326int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1355{ 1327{
1356 int retval; 1328 struct task_struct *thread;
1357 1329
1358 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1330 thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
1359 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); 1331 dev_name(&adapter->ccw_device->dev));
1360 if (retval < 0) { 1332 if (IS_ERR(thread)) {
1361 dev_err(&adapter->ccw_device->dev, 1333 dev_err(&adapter->ccw_device->dev,
1362 "Creating an ERP thread for the FCP device failed.\n"); 1334 "Creating an ERP thread for the FCP device failed.\n");
1363 return retval; 1335 return PTR_ERR(thread);
1364 } 1336 }
1365 wait_event(adapter->erp_thread_wqh, 1337
1366 atomic_read(&adapter->status) & 1338 adapter->erp_thread = thread;
1367 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP);
1368 return 0; 1339 return 0;
1369} 1340}
1370 1341
@@ -1379,16 +1350,10 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1379 */ 1350 */
1380void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) 1351void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1381{ 1352{
1382 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); 1353 kthread_stop(adapter->erp_thread);
1383 up(&adapter->erp_ready_sem); 1354 adapter->erp_thread = NULL;
1384 zfcp_rec_dbf_event_thread_lock("erthrk1", adapter); 1355 WARN_ON(!list_empty(&adapter->erp_ready_head));
1385 1356 WARN_ON(!list_empty(&adapter->erp_running_head));
1386 wait_event(adapter->erp_thread_wqh,
1387 !(atomic_read(&adapter->status) &
1388 ZFCP_STATUS_ADAPTER_ERP_THREAD_UP));
1389
1390 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
1391 &adapter->status);
1392} 1357}
1393 1358
1394/** 1359/**
@@ -1456,11 +1421,11 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1456 1421
1457 if (set_or_clear == ZFCP_SET) { 1422 if (set_or_clear == ZFCP_SET) {
1458 if (status_change_set(mask, &adapter->status)) 1423 if (status_change_set(mask, &adapter->status))
1459 zfcp_rec_dbf_event_adapter(id, ref, adapter); 1424 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1460 atomic_set_mask(mask, &adapter->status); 1425 atomic_set_mask(mask, &adapter->status);
1461 } else { 1426 } else {
1462 if (status_change_clear(mask, &adapter->status)) 1427 if (status_change_clear(mask, &adapter->status))
1463 zfcp_rec_dbf_event_adapter(id, ref, adapter); 1428 zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
1464 atomic_clear_mask(mask, &adapter->status); 1429 atomic_clear_mask(mask, &adapter->status);
1465 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) 1430 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1466 atomic_set(&adapter->erp_counter, 0); 1431 atomic_set(&adapter->erp_counter, 0);
@@ -1490,11 +1455,11 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1490 1455
1491 if (set_or_clear == ZFCP_SET) { 1456 if (set_or_clear == ZFCP_SET) {
1492 if (status_change_set(mask, &port->status)) 1457 if (status_change_set(mask, &port->status))
1493 zfcp_rec_dbf_event_port(id, ref, port); 1458 zfcp_dbf_rec_port(id, ref, port);
1494 atomic_set_mask(mask, &port->status); 1459 atomic_set_mask(mask, &port->status);
1495 } else { 1460 } else {
1496 if (status_change_clear(mask, &port->status)) 1461 if (status_change_clear(mask, &port->status))
1497 zfcp_rec_dbf_event_port(id, ref, port); 1462 zfcp_dbf_rec_port(id, ref, port);
1498 atomic_clear_mask(mask, &port->status); 1463 atomic_clear_mask(mask, &port->status);
1499 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) 1464 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
1500 atomic_set(&port->erp_counter, 0); 1465 atomic_set(&port->erp_counter, 0);
@@ -1519,11 +1484,11 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1519{ 1484{
1520 if (set_or_clear == ZFCP_SET) { 1485 if (set_or_clear == ZFCP_SET) {
1521 if (status_change_set(mask, &unit->status)) 1486 if (status_change_set(mask, &unit->status))
1522 zfcp_rec_dbf_event_unit(id, ref, unit); 1487 zfcp_dbf_rec_unit(id, ref, unit);
1523 atomic_set_mask(mask, &unit->status); 1488 atomic_set_mask(mask, &unit->status);
1524 } else { 1489 } else {
1525 if (status_change_clear(mask, &unit->status)) 1490 if (status_change_clear(mask, &unit->status))
1526 zfcp_rec_dbf_event_unit(id, ref, unit); 1491 zfcp_dbf_rec_unit(id, ref, unit);
1527 atomic_clear_mask(mask, &unit->status); 1492 atomic_clear_mask(mask, &unit->status);
1528 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { 1493 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
1529 atomic_set(&unit->erp_counter, 0); 1494 atomic_set(&unit->erp_counter, 0);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3044c6010306..36935bc0818f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -34,37 +34,31 @@ extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
34extern struct miscdevice zfcp_cfdc_misc; 34extern struct miscdevice zfcp_cfdc_misc;
35 35
36/* zfcp_dbf.c */ 36/* zfcp_dbf.c */
37extern int zfcp_adapter_debug_register(struct zfcp_adapter *); 37extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
38extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *); 38extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *);
39extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *); 39extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
40extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *); 40extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
41extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *); 41extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
42extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *); 42extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
43extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *); 43extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *);
44extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *, 44extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
45 struct zfcp_adapter *, 45 struct zfcp_adapter *, struct zfcp_port *,
46 struct zfcp_port *, struct zfcp_unit *); 46 struct zfcp_unit *);
47extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *); 47extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
48extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); 48extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
49extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, 49 struct zfcp_dbf *);
50 struct fsf_status_read_buffer *); 50extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, 51 struct fsf_status_read_buffer *);
52 int); 52extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
53extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, 53extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
54 struct zfcp_fsf_req *); 54extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *);
55extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); 55extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
56extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); 56extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
57extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); 57extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
58extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); 58extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
59extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); 59extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
60extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, 60 struct scsi_cmnd *, struct zfcp_fsf_req *,
61 struct scsi_cmnd *, 61 unsigned long);
62 struct zfcp_fsf_req *);
63extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
64 struct scsi_cmnd *, struct zfcp_fsf_req *,
65 unsigned long);
66extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
67 struct scsi_cmnd *);
68 62
69/* zfcp_erp.c */ 63/* zfcp_erp.c */
70extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, 64extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
@@ -96,22 +90,20 @@ extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
96extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, 90extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
97 void *); 91 void *);
98extern void zfcp_erp_timeout_handler(unsigned long); 92extern void zfcp_erp_timeout_handler(unsigned long);
99extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
100 93
101/* zfcp_fc.c */ 94/* zfcp_fc.c */
102extern int zfcp_scan_ports(struct zfcp_adapter *); 95extern int zfcp_fc_scan_ports(struct zfcp_adapter *);
103extern void _zfcp_scan_ports_later(struct work_struct *); 96extern void _zfcp_fc_scan_ports_later(struct work_struct *);
104extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 97extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
105extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); 98extern void zfcp_fc_port_did_lookup(struct work_struct *);
106extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 99extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
107extern void zfcp_test_link(struct zfcp_port *); 100extern void zfcp_fc_test_link(struct zfcp_port *);
108extern void zfcp_fc_link_test_work(struct work_struct *); 101extern void zfcp_fc_link_test_work(struct work_struct *);
109extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); 102extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *);
110extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); 103extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
104extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
111extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); 105extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
112extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); 106extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
113extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
114
115 107
116/* zfcp_fsf.c */ 108/* zfcp_fsf.c */
117extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 109extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
@@ -122,37 +114,39 @@ extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
122extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 114extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
123extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); 115extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
124extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); 116extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
125extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *, 117extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
126 struct fsf_qtcb_bottom_config *); 118 struct fsf_qtcb_bottom_config *);
127extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); 119extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
128extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *, 120extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
129 struct fsf_qtcb_bottom_port *); 121 struct fsf_qtcb_bottom_port *);
130extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, 122extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
131 struct zfcp_fsf_cfdc *); 123 struct zfcp_fsf_cfdc *);
132extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); 124extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
133extern int zfcp_fsf_status_read(struct zfcp_adapter *); 125extern int zfcp_fsf_status_read(struct zfcp_qdio *);
134extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 126extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
135extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, 127extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *);
136 struct zfcp_erp_action *);
137extern int zfcp_fsf_send_els(struct zfcp_send_els *); 128extern int zfcp_fsf_send_els(struct zfcp_send_els *);
138extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 129extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
139 struct scsi_cmnd *); 130 struct scsi_cmnd *);
140extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
141extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 131extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
142extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); 132extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
143extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, 133extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
144 struct zfcp_unit *); 134 struct zfcp_unit *);
135extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
145 136
146/* zfcp_qdio.c */ 137/* zfcp_qdio.c */
147extern int zfcp_qdio_allocate(struct zfcp_adapter *); 138extern int zfcp_qdio_setup(struct zfcp_adapter *);
148extern void zfcp_qdio_free(struct zfcp_adapter *); 139extern void zfcp_qdio_destroy(struct zfcp_qdio *);
149extern int zfcp_qdio_send(struct zfcp_fsf_req *); 140extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *);
150extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *); 141extern struct qdio_buffer_element
151extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *); 142 *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
152extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, 143extern struct qdio_buffer_element
144 *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
145extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
146 struct zfcp_queue_req *, unsigned long,
153 struct scatterlist *, int); 147 struct scatterlist *, int);
154extern int zfcp_qdio_open(struct zfcp_adapter *); 148extern int zfcp_qdio_open(struct zfcp_qdio *);
155extern void zfcp_qdio_close(struct zfcp_adapter *); 149extern void zfcp_qdio_close(struct zfcp_qdio *);
156 150
157/* zfcp_scsi.c */ 151/* zfcp_scsi.c */
158extern struct zfcp_data zfcp_data; 152extern struct zfcp_data zfcp_data;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 47daebfa7e59..722f22de8753 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -25,14 +25,6 @@ static u32 rscn_range_mask[] = {
25 [RSCN_FABRIC_ADDRESS] = 0x000000, 25 [RSCN_FABRIC_ADDRESS] = 0x000000,
26}; 26};
27 27
28struct ct_iu_gpn_ft_req {
29 struct ct_hdr header;
30 u8 flags;
31 u8 domain_id_scope;
32 u8 area_id_scope;
33 u8 fc4_type;
34} __attribute__ ((packed));
35
36struct gpn_ft_resp_acc { 28struct gpn_ft_resp_acc {
37 u8 control; 29 u8 control;
38 u8 port_id[3]; 30 u8 port_id[3];
@@ -65,7 +57,7 @@ struct zfcp_fc_ns_handler_data {
65 unsigned long handler_data; 57 unsigned long handler_data;
66}; 58};
67 59
68static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) 60static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
69{ 61{
70 if (mutex_lock_interruptible(&wka_port->mutex)) 62 if (mutex_lock_interruptible(&wka_port->mutex))
71 return -ERESTARTSYS; 63 return -ERESTARTSYS;
@@ -90,7 +82,7 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
90 return -EIO; 82 return -EIO;
91} 83}
92 84
93static void zfcp_wka_port_offline(struct work_struct *work) 85static void zfcp_fc_wka_port_offline(struct work_struct *work)
94{ 86{
95 struct delayed_work *dw = to_delayed_work(work); 87 struct delayed_work *dw = to_delayed_work(work);
96 struct zfcp_wka_port *wka_port = 88 struct zfcp_wka_port *wka_port =
@@ -110,7 +102,7 @@ out:
110 mutex_unlock(&wka_port->mutex); 102 mutex_unlock(&wka_port->mutex);
111} 103}
112 104
113static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) 105static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
114{ 106{
115 if (atomic_dec_return(&wka_port->refcount) != 0) 107 if (atomic_dec_return(&wka_port->refcount) != 0)
116 return; 108 return;
@@ -129,10 +121,10 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
129 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 121 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
130 atomic_set(&wka_port->refcount, 0); 122 atomic_set(&wka_port->refcount, 0);
131 mutex_init(&wka_port->mutex); 123 mutex_init(&wka_port->mutex);
132 INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline); 124 INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
133} 125}
134 126
135void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) 127static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
136{ 128{
137 cancel_delayed_work_sync(&wka->work); 129 cancel_delayed_work_sync(&wka->work);
138 mutex_lock(&wka->mutex); 130 mutex_lock(&wka->mutex);
@@ -140,15 +132,13 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
140 mutex_unlock(&wka->mutex); 132 mutex_unlock(&wka->mutex);
141} 133}
142 134
143void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) 135void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs)
144{ 136{
145 struct zfcp_wka_ports *gs = adapter->gs; 137 zfcp_fc_wka_port_force_offline(&gs->ms);
146 138 zfcp_fc_wka_port_force_offline(&gs->ts);
147 zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter); 139 zfcp_fc_wka_port_force_offline(&gs->ds);
148 zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter); 140 zfcp_fc_wka_port_force_offline(&gs->as);
149 zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter); 141 zfcp_fc_wka_port_force_offline(&gs->ks);
150 zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter);
151 zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter);
152} 142}
153 143
154static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 144static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
@@ -160,7 +150,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
160 read_lock_irqsave(&zfcp_data.config_lock, flags); 150 read_lock_irqsave(&zfcp_data.config_lock, flags);
161 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 151 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
162 if ((port->d_id & range) == (elem->nport_did & range)) 152 if ((port->d_id & range) == (elem->nport_did & range))
163 zfcp_test_link(port); 153 zfcp_fc_test_link(port);
164 if (!port->d_id) 154 if (!port->d_id)
165 zfcp_erp_port_reopen(port, 155 zfcp_erp_port_reopen(port,
166 ZFCP_STATUS_COMMON_ERP_FAILED, 156 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -241,7 +231,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
241 (struct fsf_status_read_buffer *) fsf_req->data; 231 (struct fsf_status_read_buffer *) fsf_req->data;
242 unsigned int els_type = status_buffer->payload.data[0]; 232 unsigned int els_type = status_buffer->payload.data[0];
243 233
244 zfcp_san_dbf_event_incoming_els(fsf_req); 234 zfcp_dbf_san_incoming_els(fsf_req);
245 if (els_type == LS_PLOGI) 235 if (els_type == LS_PLOGI)
246 zfcp_fc_incoming_plogi(fsf_req); 236 zfcp_fc_incoming_plogi(fsf_req);
247 else if (els_type == LS_LOGO) 237 else if (els_type == LS_LOGO)
@@ -281,19 +271,18 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
281 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 271 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
282} 272}
283 273
284int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, 274static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
285 struct zfcp_gid_pn_data *gid_pn) 275 struct zfcp_gid_pn_data *gid_pn)
286{ 276{
287 struct zfcp_adapter *adapter = erp_action->adapter; 277 struct zfcp_adapter *adapter = port->adapter;
288 struct zfcp_fc_ns_handler_data compl_rec; 278 struct zfcp_fc_ns_handler_data compl_rec;
289 int ret; 279 int ret;
290 280
291 /* setup parameters for send generic command */ 281 /* setup parameters for send generic command */
292 gid_pn->port = erp_action->port; 282 gid_pn->port = port;
293 gid_pn->ct.wka_port = &adapter->gs->ds; 283 gid_pn->ct.wka_port = &adapter->gs->ds;
294 gid_pn->ct.handler = zfcp_fc_ns_handler; 284 gid_pn->ct.handler = zfcp_fc_ns_handler;
295 gid_pn->ct.handler_data = (unsigned long) &compl_rec; 285 gid_pn->ct.handler_data = (unsigned long) &compl_rec;
296 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
297 gid_pn->ct.req = &gid_pn->req; 286 gid_pn->ct.req = &gid_pn->req;
298 gid_pn->ct.resp = &gid_pn->resp; 287 gid_pn->ct.resp = &gid_pn->resp;
299 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, 288 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
@@ -308,13 +297,12 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
308 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; 297 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
309 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; 298 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
310 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; 299 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
311 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; 300 gid_pn->ct_iu_req.wwpn = port->wwpn;
312 301
313 init_completion(&compl_rec.done); 302 init_completion(&compl_rec.done);
314 compl_rec.handler = zfcp_fc_ns_gid_pn_eval; 303 compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
315 compl_rec.handler_data = (unsigned long) gid_pn; 304 compl_rec.handler_data = (unsigned long) gid_pn;
316 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, 305 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req);
317 erp_action);
318 if (!ret) 306 if (!ret)
319 wait_for_completion(&compl_rec.done); 307 wait_for_completion(&compl_rec.done);
320 return ret; 308 return ret;
@@ -322,33 +310,56 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
322 310
323/** 311/**
324 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request 312 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
325 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed 313 * @port: port where GID_PN request is needed
326 * return: -ENOMEM on error, 0 otherwise 314 * return: -ENOMEM on error, 0 otherwise
327 */ 315 */
328int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) 316static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
329{ 317{
330 int ret; 318 int ret;
331 struct zfcp_gid_pn_data *gid_pn; 319 struct zfcp_gid_pn_data *gid_pn;
332 struct zfcp_adapter *adapter = erp_action->adapter; 320 struct zfcp_adapter *adapter = port->adapter;
333 321
334 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); 322 gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC);
335 if (!gid_pn) 323 if (!gid_pn)
336 return -ENOMEM; 324 return -ENOMEM;
337 325
338 memset(gid_pn, 0, sizeof(*gid_pn)); 326 memset(gid_pn, 0, sizeof(*gid_pn));
339 327
340 ret = zfcp_wka_port_get(&adapter->gs->ds); 328 ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
341 if (ret) 329 if (ret)
342 goto out; 330 goto out;
343 331
344 ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn); 332 ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
345 333
346 zfcp_wka_port_put(&adapter->gs->ds); 334 zfcp_fc_wka_port_put(&adapter->gs->ds);
347out: 335out:
348 mempool_free(gid_pn, adapter->pool.data_gid_pn); 336 mempool_free(gid_pn, adapter->pool.gid_pn_data);
349 return ret; 337 return ret;
350} 338}
351 339
340void zfcp_fc_port_did_lookup(struct work_struct *work)
341{
342 int ret;
343 struct zfcp_port *port = container_of(work, struct zfcp_port,
344 gid_pn_work);
345
346 ret = zfcp_fc_ns_gid_pn(port);
347 if (ret) {
348 /* could not issue gid_pn for some reason */
349 zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL);
350 goto out;
351 }
352
353 if (!port->d_id) {
354 zfcp_erp_port_failed(port, "fcgpn_2", NULL);
355 goto out;
356 }
357
358 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
359out:
360 zfcp_port_put(port);
361}
362
352/** 363/**
353 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload 364 * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
354 * @port: zfcp_port structure 365 * @port: zfcp_port structure
@@ -404,6 +415,7 @@ static void zfcp_fc_adisc_handler(unsigned long data)
404 /* port is good, unblock rport without going through erp */ 415 /* port is good, unblock rport without going through erp */
405 zfcp_scsi_schedule_rport_register(port); 416 zfcp_scsi_schedule_rport_register(port);
406 out: 417 out:
418 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
407 zfcp_port_put(port); 419 zfcp_port_put(port);
408 kfree(adisc); 420 kfree(adisc);
409} 421}
@@ -450,28 +462,36 @@ void zfcp_fc_link_test_work(struct work_struct *work)
450 port->rport_task = RPORT_DEL; 462 port->rport_task = RPORT_DEL;
451 zfcp_scsi_rport_work(&port->rport_work); 463 zfcp_scsi_rport_work(&port->rport_work);
452 464
465 /* only issue one test command at one time per port */
466 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
467 goto out;
468
469 atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
470
453 retval = zfcp_fc_adisc(port); 471 retval = zfcp_fc_adisc(port);
454 if (retval == 0) 472 if (retval == 0)
455 return; 473 return;
456 474
457 /* send of ADISC was not possible */ 475 /* send of ADISC was not possible */
476 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
458 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); 477 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
459 478
479out:
460 zfcp_port_put(port); 480 zfcp_port_put(port);
461} 481}
462 482
463/** 483/**
464 * zfcp_test_link - lightweight link test procedure 484 * zfcp_fc_test_link - lightweight link test procedure
465 * @port: port to be tested 485 * @port: port to be tested
466 * 486 *
467 * Test status of a link to a remote port using the ELS command ADISC. 487 * Test status of a link to a remote port using the ELS command ADISC.
468 * If there is a problem with the remote port, error recovery steps 488 * If there is a problem with the remote port, error recovery steps
469 * will be triggered. 489 * will be triggered.
470 */ 490 */
471void zfcp_test_link(struct zfcp_port *port) 491void zfcp_fc_test_link(struct zfcp_port *port)
472{ 492{
473 zfcp_port_get(port); 493 zfcp_port_get(port);
474 if (!queue_work(zfcp_data.work_queue, &port->test_link_work)) 494 if (!queue_work(port->adapter->work_queue, &port->test_link_work))
475 zfcp_port_put(port); 495 zfcp_port_put(port);
476} 496}
477 497
@@ -479,7 +499,7 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
479{ 499{
480 struct scatterlist *sg = &gpn_ft->sg_req; 500 struct scatterlist *sg = &gpn_ft->sg_req;
481 501
482 kfree(sg_virt(sg)); /* free request buffer */ 502 kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
483 zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); 503 zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
484 504
485 kfree(gpn_ft); 505 kfree(gpn_ft);
@@ -494,7 +514,7 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
494 if (!gpn_ft) 514 if (!gpn_ft)
495 return NULL; 515 return NULL;
496 516
497 req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL); 517 req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
498 if (!req) { 518 if (!req) {
499 kfree(gpn_ft); 519 kfree(gpn_ft);
500 gpn_ft = NULL; 520 gpn_ft = NULL;
@@ -511,9 +531,8 @@ out:
511} 531}
512 532
513 533
514static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, 534static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
515 struct zfcp_adapter *adapter, 535 struct zfcp_adapter *adapter, int max_bytes)
516 int max_bytes)
517{ 536{
518 struct zfcp_send_ct *ct = &gpn_ft->ct; 537 struct zfcp_send_ct *ct = &gpn_ft->ct;
519 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 538 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
@@ -536,19 +555,18 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
536 ct->wka_port = &adapter->gs->ds; 555 ct->wka_port = &adapter->gs->ds;
537 ct->handler = zfcp_fc_ns_handler; 556 ct->handler = zfcp_fc_ns_handler;
538 ct->handler_data = (unsigned long)&compl_rec; 557 ct->handler_data = (unsigned long)&compl_rec;
539 ct->timeout = 10;
540 ct->req = &gpn_ft->sg_req; 558 ct->req = &gpn_ft->sg_req;
541 ct->resp = gpn_ft->sg_resp; 559 ct->resp = gpn_ft->sg_resp;
542 560
543 init_completion(&compl_rec.done); 561 init_completion(&compl_rec.done);
544 compl_rec.handler = NULL; 562 compl_rec.handler = NULL;
545 ret = zfcp_fsf_send_ct(ct, NULL, NULL); 563 ret = zfcp_fsf_send_ct(ct, NULL);
546 if (!ret) 564 if (!ret)
547 wait_for_completion(&compl_rec.done); 565 wait_for_completion(&compl_rec.done);
548 return ret; 566 return ret;
549} 567}
550 568
551static void zfcp_validate_port(struct zfcp_port *port) 569static void zfcp_fc_validate_port(struct zfcp_port *port)
552{ 570{
553 struct zfcp_adapter *adapter = port->adapter; 571 struct zfcp_adapter *adapter = port->adapter;
554 572
@@ -568,7 +586,7 @@ static void zfcp_validate_port(struct zfcp_port *port)
568 zfcp_port_dequeue(port); 586 zfcp_port_dequeue(port);
569} 587}
570 588
571static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) 589static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
572{ 590{
573 struct zfcp_send_ct *ct = &gpn_ft->ct; 591 struct zfcp_send_ct *ct = &gpn_ft->ct;
574 struct scatterlist *sg = gpn_ft->sg_resp; 592 struct scatterlist *sg = gpn_ft->sg_resp;
@@ -595,7 +613,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
595 return -E2BIG; 613 return -E2BIG;
596 } 614 }
597 615
598 down(&zfcp_data.config_sema); 616 mutex_lock(&zfcp_data.config_mutex);
599 617
600 /* first entry is the header */ 618 /* first entry is the header */
601 for (x = 1; x < max_entries && !last; x++) { 619 for (x = 1; x < max_entries && !last; x++) {
@@ -628,16 +646,16 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
628 646
629 zfcp_erp_wait(adapter); 647 zfcp_erp_wait(adapter);
630 list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) 648 list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
631 zfcp_validate_port(port); 649 zfcp_fc_validate_port(port);
632 up(&zfcp_data.config_sema); 650 mutex_unlock(&zfcp_data.config_mutex);
633 return ret; 651 return ret;
634} 652}
635 653
636/** 654/**
637 * zfcp_scan_ports - scan remote ports and attach new ports 655 * zfcp_fc_scan_ports - scan remote ports and attach new ports
638 * @adapter: pointer to struct zfcp_adapter 656 * @adapter: pointer to struct zfcp_adapter
639 */ 657 */
640int zfcp_scan_ports(struct zfcp_adapter *adapter) 658int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
641{ 659{
642 int ret, i; 660 int ret, i;
643 struct zfcp_gpn_ft *gpn_ft; 661 struct zfcp_gpn_ft *gpn_ft;
@@ -652,7 +670,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
652 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) 670 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
653 return 0; 671 return 0;
654 672
655 ret = zfcp_wka_port_get(&adapter->gs->ds); 673 ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
656 if (ret) 674 if (ret)
657 return ret; 675 return ret;
658 676
@@ -663,9 +681,9 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
663 } 681 }
664 682
665 for (i = 0; i < 3; i++) { 683 for (i = 0; i < 3; i++) {
666 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes); 684 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
667 if (!ret) { 685 if (!ret) {
668 ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries); 686 ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries);
669 if (ret == -EAGAIN) 687 if (ret == -EAGAIN)
670 ssleep(1); 688 ssleep(1);
671 else 689 else
@@ -674,14 +692,14 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
674 } 692 }
675 zfcp_free_sg_env(gpn_ft, buf_num); 693 zfcp_free_sg_env(gpn_ft, buf_num);
676out: 694out:
677 zfcp_wka_port_put(&adapter->gs->ds); 695 zfcp_fc_wka_port_put(&adapter->gs->ds);
678 return ret; 696 return ret;
679} 697}
680 698
681 699
682void _zfcp_scan_ports_later(struct work_struct *work) 700void _zfcp_fc_scan_ports_later(struct work_struct *work)
683{ 701{
684 zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); 702 zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
685} 703}
686 704
687struct zfcp_els_fc_job { 705struct zfcp_els_fc_job {
@@ -732,7 +750,7 @@ int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job)
732 els_fc_job->els.adapter = adapter; 750 els_fc_job->els.adapter = adapter;
733 if (rport) { 751 if (rport) {
734 read_lock_irq(&zfcp_data.config_lock); 752 read_lock_irq(&zfcp_data.config_lock);
735 port = rport->dd_data; 753 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
736 if (port) 754 if (port)
737 els_fc_job->els.d_id = port->d_id; 755 els_fc_job->els.d_id = port->d_id;
738 read_unlock_irq(&zfcp_data.config_lock); 756 read_unlock_irq(&zfcp_data.config_lock);
@@ -771,7 +789,7 @@ static void zfcp_fc_generic_ct_handler(unsigned long data)
771 job->state_flags = FC_RQST_STATE_DONE; 789 job->state_flags = FC_RQST_STATE_DONE;
772 job->job_done(job); 790 job->job_done(job);
773 791
774 zfcp_wka_port_put(ct_fc_job->ct.wka_port); 792 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
775 793
776 kfree(ct_fc_job); 794 kfree(ct_fc_job);
777} 795}
@@ -817,7 +835,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
817 return -EINVAL; /* no such service */ 835 return -EINVAL; /* no such service */
818 } 836 }
819 837
820 ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port); 838 ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port);
821 if (ret) { 839 if (ret) {
822 kfree(ct_fc_job); 840 kfree(ct_fc_job);
823 return ret; 841 return ret;
@@ -825,16 +843,40 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
825 843
826 ct_fc_job->ct.req = job->request_payload.sg_list; 844 ct_fc_job->ct.req = job->request_payload.sg_list;
827 ct_fc_job->ct.resp = job->reply_payload.sg_list; 845 ct_fc_job->ct.resp = job->reply_payload.sg_list;
828 ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT;
829 ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; 846 ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
830 ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; 847 ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
831 ct_fc_job->ct.completion = NULL; 848 ct_fc_job->ct.completion = NULL;
832 ct_fc_job->job = job; 849 ct_fc_job->job = job;
833 850
834 ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL); 851 ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL);
835 if (ret) { 852 if (ret) {
836 kfree(ct_fc_job); 853 kfree(ct_fc_job);
837 zfcp_wka_port_put(ct_fc_job->ct.wka_port); 854 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
838 } 855 }
839 return ret; 856 return ret;
840} 857}
858
859int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
860{
861 struct zfcp_wka_ports *wka_ports;
862
863 wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
864 if (!wka_ports)
865 return -ENOMEM;
866
867 adapter->gs = wka_ports;
868 zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
869 zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
870 zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
871 zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
872 zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
873
874 return 0;
875}
876
877void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
878{
879 kfree(adapter->gs);
880 adapter->gs = NULL;
881}
882
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 47795fbf081f..f09c863dc6bd 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,9 +11,7 @@
11 11
12#include <linux/blktrace_api.h> 12#include <linux/blktrace_api.h>
13#include "zfcp_ext.h" 13#include "zfcp_ext.h"
14 14#include "zfcp_dbf.h"
15#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
16#define ZFCP_REQ_NO_QTCB 0x00000008
17 15
18static void zfcp_fsf_request_timeout_handler(unsigned long data) 16static void zfcp_fsf_request_timeout_handler(unsigned long data)
19{ 17{
@@ -111,43 +109,15 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
111void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 109void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
112{ 110{
113 if (likely(req->pool)) { 111 if (likely(req->pool)) {
112 if (likely(req->qtcb))
113 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
114 mempool_free(req, req->pool); 114 mempool_free(req, req->pool);
115 return; 115 return;
116 } 116 }
117 117
118 if (req->qtcb) { 118 if (likely(req->qtcb))
119 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req); 119 kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
120 return; 120 kfree(req);
121 }
122}
123
124/**
125 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
126 * @adapter: pointer to struct zfcp_adapter
127 *
128 * Never ever call this without shutting down the adapter first.
129 * Otherwise the adapter would continue using and corrupting s390 storage.
130 * Included BUG_ON() call to ensure this is done.
131 * ERP is supposed to be the only user of this function.
132 */
133void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
134{
135 struct zfcp_fsf_req *req, *tmp;
136 unsigned long flags;
137 LIST_HEAD(remove_queue);
138 unsigned int i;
139
140 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
141 spin_lock_irqsave(&adapter->req_list_lock, flags);
142 for (i = 0; i < REQUEST_LIST_SIZE; i++)
143 list_splice_init(&adapter->req_list[i], &remove_queue);
144 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
145
146 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
147 list_del(&req->list);
148 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
149 zfcp_fsf_req_complete(req);
150 }
151} 121}
152 122
153static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 123static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
@@ -278,13 +248,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
278 struct fsf_status_read_buffer *sr_buf = req->data; 248 struct fsf_status_read_buffer *sr_buf = req->data;
279 249
280 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 250 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
281 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); 251 zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
282 mempool_free(sr_buf, adapter->pool.data_status_read); 252 mempool_free(sr_buf, adapter->pool.status_read_data);
283 zfcp_fsf_req_free(req); 253 zfcp_fsf_req_free(req);
284 return; 254 return;
285 } 255 }
286 256
287 zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf); 257 zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
288 258
289 switch (sr_buf->status_type) { 259 switch (sr_buf->status_type) {
290 case FSF_STATUS_READ_PORT_CLOSED: 260 case FSF_STATUS_READ_PORT_CLOSED:
@@ -299,7 +269,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
299 dev_warn(&adapter->ccw_device->dev, 269 dev_warn(&adapter->ccw_device->dev,
300 "The error threshold for checksum statistics " 270 "The error threshold for checksum statistics "
301 "has been exceeded\n"); 271 "has been exceeded\n");
302 zfcp_hba_dbf_event_berr(adapter, req); 272 zfcp_dbf_hba_berr(adapter->dbf, req);
303 break; 273 break;
304 case FSF_STATUS_READ_LINK_DOWN: 274 case FSF_STATUS_READ_LINK_DOWN:
305 zfcp_fsf_status_read_link_down(req); 275 zfcp_fsf_status_read_link_down(req);
@@ -331,11 +301,11 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
331 break; 301 break;
332 } 302 }
333 303
334 mempool_free(sr_buf, adapter->pool.data_status_read); 304 mempool_free(sr_buf, adapter->pool.status_read_data);
335 zfcp_fsf_req_free(req); 305 zfcp_fsf_req_free(req);
336 306
337 atomic_inc(&adapter->stat_miss); 307 atomic_inc(&adapter->stat_miss);
338 queue_work(zfcp_data.work_queue, &adapter->stat_work); 308 queue_work(adapter->work_queue, &adapter->stat_work);
339} 309}
340 310
341static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 311static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
@@ -385,7 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
385 struct fsf_qtcb *qtcb = req->qtcb; 355 struct fsf_qtcb *qtcb = req->qtcb;
386 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 356 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
387 357
388 zfcp_hba_dbf_event_fsf_response(req); 358 zfcp_dbf_hba_fsf_response(req);
389 359
390 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
391 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 361 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
@@ -458,7 +428,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
458 * is called to process the completion status and trigger further 428 * is called to process the completion status and trigger further
459 * events related to the FSF request. 429 * events related to the FSF request.
460 */ 430 */
461void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 431static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
462{ 432{
463 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { 433 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
464 zfcp_fsf_status_read_handler(req); 434 zfcp_fsf_status_read_handler(req);
@@ -472,23 +442,40 @@ void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
472 442
473 if (req->erp_action) 443 if (req->erp_action)
474 zfcp_erp_notify(req->erp_action, 0); 444 zfcp_erp_notify(req->erp_action, 0);
475 req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
476 445
477 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 446 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
478 zfcp_fsf_req_free(req); 447 zfcp_fsf_req_free(req);
479 else 448 else
480 /* notify initiator waiting for the requests completion */ 449 complete(&req->completion);
481 /* 450}
482 * FIXME: Race! We must not access fsf_req here as it might have been 451
483 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED 452/**
484 * flag. It's an improbable case. But, we have the same paranoia for 453 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
485 * the cleanup flag already. 454 * @adapter: pointer to struct zfcp_adapter
486 * Might better be handled using complete()? 455 *
487 * (setting the flag and doing wakeup ought to be atomic 456 * Never ever call this without shutting down the adapter first.
488 * with regard to checking the flag as long as waitqueue is 457 * Otherwise the adapter would continue using and corrupting s390 storage.
489 * part of the to be released structure) 458 * Included BUG_ON() call to ensure this is done.
490 */ 459 * ERP is supposed to be the only user of this function.
491 wake_up(&req->completion_wq); 460 */
461void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
462{
463 struct zfcp_fsf_req *req, *tmp;
464 unsigned long flags;
465 LIST_HEAD(remove_queue);
466 unsigned int i;
467
468 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
469 spin_lock_irqsave(&adapter->req_list_lock, flags);
470 for (i = 0; i < REQUEST_LIST_SIZE; i++)
471 list_splice_init(&adapter->req_list[i], &remove_queue);
472 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
473
474 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
475 list_del(&req->list);
476 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
477 zfcp_fsf_req_complete(req);
478 }
492} 479}
493 480
494static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 481static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
@@ -650,79 +637,77 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
650 } 637 }
651} 638}
652 639
653static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter) 640static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
654{ 641{
655 struct zfcp_qdio_queue *req_q = &adapter->req_q; 642 struct zfcp_qdio_queue *req_q = &qdio->req_q;
656 643
657 spin_lock_bh(&adapter->req_q_lock); 644 spin_lock_bh(&qdio->req_q_lock);
658 if (atomic_read(&req_q->count)) 645 if (atomic_read(&req_q->count))
659 return 1; 646 return 1;
660 spin_unlock_bh(&adapter->req_q_lock); 647 spin_unlock_bh(&qdio->req_q_lock);
661 return 0; 648 return 0;
662} 649}
663 650
664static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) 651static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
665{ 652{
653 struct zfcp_adapter *adapter = qdio->adapter;
666 long ret; 654 long ret;
667 655
668 spin_unlock_bh(&adapter->req_q_lock); 656 spin_unlock_bh(&qdio->req_q_lock);
669 ret = wait_event_interruptible_timeout(adapter->request_wq, 657 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
670 zfcp_fsf_sbal_check(adapter), 5 * HZ); 658 zfcp_fsf_sbal_check(qdio), 5 * HZ);
671 if (ret > 0) 659 if (ret > 0)
672 return 0; 660 return 0;
673 if (!ret) { 661 if (!ret) {
674 atomic_inc(&adapter->qdio_outb_full); 662 atomic_inc(&qdio->req_q_full);
675 /* assume hanging outbound queue, try queue recovery */ 663 /* assume hanging outbound queue, try queue recovery */
676 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); 664 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
677 } 665 }
678 666
679 spin_lock_bh(&adapter->req_q_lock); 667 spin_lock_bh(&qdio->req_q_lock);
680 return -EIO; 668 return -EIO;
681} 669}
682 670
683static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool) 671static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
684{ 672{
685 struct zfcp_fsf_req *req; 673 struct zfcp_fsf_req *req;
686 req = mempool_alloc(pool, GFP_ATOMIC); 674
687 if (!req) 675 if (likely(pool))
676 req = mempool_alloc(pool, GFP_ATOMIC);
677 else
678 req = kmalloc(sizeof(*req), GFP_ATOMIC);
679
680 if (unlikely(!req))
688 return NULL; 681 return NULL;
682
689 memset(req, 0, sizeof(*req)); 683 memset(req, 0, sizeof(*req));
690 req->pool = pool; 684 req->pool = pool;
691 return req; 685 return req;
692} 686}
693 687
694static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool) 688static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
695{ 689{
696 struct zfcp_fsf_req_qtcb *qtcb; 690 struct fsf_qtcb *qtcb;
697 691
698 if (likely(pool)) 692 if (likely(pool))
699 qtcb = mempool_alloc(pool, GFP_ATOMIC); 693 qtcb = mempool_alloc(pool, GFP_ATOMIC);
700 else 694 else
701 qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, 695 qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
702 GFP_ATOMIC); 696
703 if (unlikely(!qtcb)) 697 if (unlikely(!qtcb))
704 return NULL; 698 return NULL;
705 699
706 memset(qtcb, 0, sizeof(*qtcb)); 700 memset(qtcb, 0, sizeof(*qtcb));
707 qtcb->fsf_req.qtcb = &qtcb->qtcb; 701 return qtcb;
708 qtcb->fsf_req.pool = pool;
709
710 return &qtcb->fsf_req;
711} 702}
712 703
713static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, 704static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
714 u32 fsf_cmd, int req_flags, 705 u32 fsf_cmd, mempool_t *pool)
715 mempool_t *pool)
716{ 706{
717 struct qdio_buffer_element *sbale; 707 struct qdio_buffer_element *sbale;
718 708 struct zfcp_qdio_queue *req_q = &qdio->req_q;
719 struct zfcp_fsf_req *req; 709 struct zfcp_adapter *adapter = qdio->adapter;
720 struct zfcp_qdio_queue *req_q = &adapter->req_q; 710 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
721
722 if (req_flags & ZFCP_REQ_NO_QTCB)
723 req = zfcp_fsf_alloc_noqtcb(pool);
724 else
725 req = zfcp_fsf_alloc_qtcb(pool);
726 711
727 if (unlikely(!req)) 712 if (unlikely(!req))
728 return ERR_PTR(-ENOMEM); 713 return ERR_PTR(-ENOMEM);
@@ -732,22 +717,32 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
732 717
733 INIT_LIST_HEAD(&req->list); 718 INIT_LIST_HEAD(&req->list);
734 init_timer(&req->timer); 719 init_timer(&req->timer);
735 init_waitqueue_head(&req->completion_wq); 720 init_completion(&req->completion);
736 721
737 req->adapter = adapter; 722 req->adapter = adapter;
738 req->fsf_command = fsf_cmd; 723 req->fsf_command = fsf_cmd;
739 req->req_id = adapter->req_no; 724 req->req_id = adapter->req_no;
740 req->sbal_number = 1; 725 req->queue_req.sbal_number = 1;
741 req->sbal_first = req_q->first; 726 req->queue_req.sbal_first = req_q->first;
742 req->sbal_last = req_q->first; 727 req->queue_req.sbal_last = req_q->first;
743 req->sbale_curr = 1; 728 req->queue_req.sbale_curr = 1;
744 729
745 sbale = zfcp_qdio_sbale_req(req); 730 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
746 sbale[0].addr = (void *) req->req_id; 731 sbale[0].addr = (void *) req->req_id;
747 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 732 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
748 733
749 if (likely(req->qtcb)) { 734 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
750 req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; 735 if (likely(pool))
736 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
737 else
738 req->qtcb = zfcp_qtcb_alloc(NULL);
739
740 if (unlikely(!req->qtcb)) {
741 zfcp_fsf_req_free(req);
742 return ERR_PTR(-ENOMEM);
743 }
744
745 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
751 req->qtcb->prefix.req_id = req->req_id; 746 req->qtcb->prefix.req_id = req->req_id;
752 req->qtcb->prefix.ulp_info = 26; 747 req->qtcb->prefix.ulp_info = 26;
753 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; 748 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
@@ -765,15 +760,13 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
765 return ERR_PTR(-EIO); 760 return ERR_PTR(-EIO);
766 } 761 }
767 762
768 if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
769 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
770
771 return req; 763 return req;
772} 764}
773 765
774static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 766static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
775{ 767{
776 struct zfcp_adapter *adapter = req->adapter; 768 struct zfcp_adapter *adapter = req->adapter;
769 struct zfcp_qdio *qdio = adapter->qdio;
777 unsigned long flags; 770 unsigned long flags;
778 int idx; 771 int idx;
779 int with_qtcb = (req->qtcb != NULL); 772 int with_qtcb = (req->qtcb != NULL);
@@ -784,9 +777,9 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
784 list_add_tail(&req->list, &adapter->req_list[idx]); 777 list_add_tail(&req->list, &adapter->req_list[idx]);
785 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 778 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
786 779
787 req->qdio_outb_usage = atomic_read(&adapter->req_q.count); 780 req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
788 req->issued = get_clock(); 781 req->issued = get_clock();
789 if (zfcp_qdio_send(req)) { 782 if (zfcp_qdio_send(qdio, &req->queue_req)) {
790 del_timer(&req->timer); 783 del_timer(&req->timer);
791 spin_lock_irqsave(&adapter->req_list_lock, flags); 784 spin_lock_irqsave(&adapter->req_list_lock, flags);
792 /* lookup request again, list might have changed */ 785 /* lookup request again, list might have changed */
@@ -811,38 +804,37 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
811 * @req_flags: request flags 804 * @req_flags: request flags
812 * Returns: 0 on success, ERROR otherwise 805 * Returns: 0 on success, ERROR otherwise
813 */ 806 */
814int zfcp_fsf_status_read(struct zfcp_adapter *adapter) 807int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
815{ 808{
809 struct zfcp_adapter *adapter = qdio->adapter;
816 struct zfcp_fsf_req *req; 810 struct zfcp_fsf_req *req;
817 struct fsf_status_read_buffer *sr_buf; 811 struct fsf_status_read_buffer *sr_buf;
818 struct qdio_buffer_element *sbale; 812 struct qdio_buffer_element *sbale;
819 int retval = -EIO; 813 int retval = -EIO;
820 814
821 spin_lock_bh(&adapter->req_q_lock); 815 spin_lock_bh(&qdio->req_q_lock);
822 if (zfcp_fsf_req_sbal_get(adapter)) 816 if (zfcp_fsf_req_sbal_get(qdio))
823 goto out; 817 goto out;
824 818
825 req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, 819 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
826 ZFCP_REQ_NO_QTCB, 820 adapter->pool.status_read_req);
827 adapter->pool.fsf_req_status_read);
828 if (IS_ERR(req)) { 821 if (IS_ERR(req)) {
829 retval = PTR_ERR(req); 822 retval = PTR_ERR(req);
830 goto out; 823 goto out;
831 } 824 }
832 825
833 sbale = zfcp_qdio_sbale_req(req); 826 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
834 sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
835 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; 827 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
836 req->sbale_curr = 2; 828 req->queue_req.sbale_curr = 2;
837 829
838 sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); 830 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
839 if (!sr_buf) { 831 if (!sr_buf) {
840 retval = -ENOMEM; 832 retval = -ENOMEM;
841 goto failed_buf; 833 goto failed_buf;
842 } 834 }
843 memset(sr_buf, 0, sizeof(*sr_buf)); 835 memset(sr_buf, 0, sizeof(*sr_buf));
844 req->data = sr_buf; 836 req->data = sr_buf;
845 sbale = zfcp_qdio_sbale_curr(req); 837 sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
846 sbale->addr = (void *) sr_buf; 838 sbale->addr = (void *) sr_buf;
847 sbale->length = sizeof(*sr_buf); 839 sbale->length = sizeof(*sr_buf);
848 840
@@ -853,12 +845,12 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
853 goto out; 845 goto out;
854 846
855failed_req_send: 847failed_req_send:
856 mempool_free(sr_buf, adapter->pool.data_status_read); 848 mempool_free(sr_buf, adapter->pool.status_read_data);
857failed_buf: 849failed_buf:
858 zfcp_fsf_req_free(req); 850 zfcp_fsf_req_free(req);
859 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); 851 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
860out: 852out:
861 spin_unlock_bh(&adapter->req_q_lock); 853 spin_unlock_bh(&qdio->req_q_lock);
862 return retval; 854 return retval;
863} 855}
864 856
@@ -900,7 +892,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
900 case FSF_ADAPTER_STATUS_AVAILABLE: 892 case FSF_ADAPTER_STATUS_AVAILABLE:
901 switch (fsq->word[0]) { 893 switch (fsq->word[0]) {
902 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 894 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
903 zfcp_test_link(unit->port); 895 zfcp_fc_test_link(unit->port);
904 /* fall through */ 896 /* fall through */
905 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 897 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
906 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 898 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -925,13 +917,13 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
925{ 917{
926 struct qdio_buffer_element *sbale; 918 struct qdio_buffer_element *sbale;
927 struct zfcp_fsf_req *req = NULL; 919 struct zfcp_fsf_req *req = NULL;
928 struct zfcp_adapter *adapter = unit->port->adapter; 920 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
929 921
930 spin_lock_bh(&adapter->req_q_lock); 922 spin_lock_bh(&qdio->req_q_lock);
931 if (zfcp_fsf_req_sbal_get(adapter)) 923 if (zfcp_fsf_req_sbal_get(qdio))
932 goto out; 924 goto out;
933 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 925 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
934 0, adapter->pool.fsf_req_abort); 926 qdio->adapter->pool.scsi_abort);
935 if (IS_ERR(req)) { 927 if (IS_ERR(req)) {
936 req = NULL; 928 req = NULL;
937 goto out; 929 goto out;
@@ -941,7 +933,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
941 ZFCP_STATUS_COMMON_UNBLOCKED))) 933 ZFCP_STATUS_COMMON_UNBLOCKED)))
942 goto out_error_free; 934 goto out_error_free;
943 935
944 sbale = zfcp_qdio_sbale_req(req); 936 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
945 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 937 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
946 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 938 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
947 939
@@ -959,7 +951,7 @@ out_error_free:
959 zfcp_fsf_req_free(req); 951 zfcp_fsf_req_free(req);
960 req = NULL; 952 req = NULL;
961out: 953out:
962 spin_unlock_bh(&adapter->req_q_lock); 954 spin_unlock_bh(&qdio->req_q_lock);
963 return req; 955 return req;
964} 956}
965 957
@@ -976,7 +968,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
976 968
977 switch (header->fsf_status) { 969 switch (header->fsf_status) {
978 case FSF_GOOD: 970 case FSF_GOOD:
979 zfcp_san_dbf_event_ct_response(req); 971 zfcp_dbf_san_ct_response(req);
980 send_ct->status = 0; 972 send_ct->status = 0;
981 break; 973 break;
982 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 974 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1035,8 +1027,10 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1035 struct scatterlist *sg_resp, 1027 struct scatterlist *sg_resp,
1036 int max_sbals) 1028 int max_sbals)
1037{ 1029{
1038 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req); 1030 struct zfcp_adapter *adapter = req->adapter;
1039 u32 feat = req->adapter->adapter_features; 1031 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1032 &req->queue_req);
1033 u32 feat = adapter->adapter_features;
1040 int bytes; 1034 int bytes;
1041 1035
1042 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { 1036 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
@@ -1053,18 +1047,25 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1053 return 0; 1047 return 0;
1054 } 1048 }
1055 1049
1056 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1050 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1051 SBAL_FLAGS0_TYPE_WRITE_READ,
1057 sg_req, max_sbals); 1052 sg_req, max_sbals);
1058 if (bytes <= 0) 1053 if (bytes <= 0)
1059 return -EIO; 1054 return -EIO;
1060 req->qtcb->bottom.support.req_buf_length = bytes; 1055 req->qtcb->bottom.support.req_buf_length = bytes;
1061 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 1056 req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1062 1057
1063 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1059 SBAL_FLAGS0_TYPE_WRITE_READ,
1064 sg_resp, max_sbals); 1060 sg_resp, max_sbals);
1065 if (bytes <= 0) 1061 if (bytes <= 0)
1066 return -EIO; 1062 return -EIO;
1063
1064 /* common settings for ct/gs and els requests */
1067 req->qtcb->bottom.support.resp_buf_length = bytes; 1065 req->qtcb->bottom.support.resp_buf_length = bytes;
1066 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1067 req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
1068 zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10);
1068 1069
1069 return 0; 1070 return 0;
1070} 1071}
@@ -1073,27 +1074,26 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1073 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1074 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1074 * @ct: pointer to struct zfcp_send_ct with data for request 1075 * @ct: pointer to struct zfcp_send_ct with data for request
1075 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1076 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1076 * @erp_action: if non-null the Generic Service request sent within ERP
1077 */ 1077 */
1078int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, 1078int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1079 struct zfcp_erp_action *erp_action)
1080{ 1079{
1081 struct zfcp_wka_port *wka_port = ct->wka_port; 1080 struct zfcp_wka_port *wka_port = ct->wka_port;
1082 struct zfcp_adapter *adapter = wka_port->adapter; 1081 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1083 struct zfcp_fsf_req *req; 1082 struct zfcp_fsf_req *req;
1084 int ret = -EIO; 1083 int ret = -EIO;
1085 1084
1086 spin_lock_bh(&adapter->req_q_lock); 1085 spin_lock_bh(&qdio->req_q_lock);
1087 if (zfcp_fsf_req_sbal_get(adapter)) 1086 if (zfcp_fsf_req_sbal_get(qdio))
1088 goto out; 1087 goto out;
1089 1088
1090 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, 1089 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
1091 ZFCP_REQ_AUTO_CLEANUP, pool); 1090
1092 if (IS_ERR(req)) { 1091 if (IS_ERR(req)) {
1093 ret = PTR_ERR(req); 1092 ret = PTR_ERR(req);
1094 goto out; 1093 goto out;
1095 } 1094 }
1096 1095
1096 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1097 ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp, 1097 ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
1098 FSF_MAX_SBALS_PER_REQ); 1098 FSF_MAX_SBALS_PER_REQ);
1099 if (ret) 1099 if (ret)
@@ -1101,18 +1101,9 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1101 1101
1102 req->handler = zfcp_fsf_send_ct_handler; 1102 req->handler = zfcp_fsf_send_ct_handler;
1103 req->qtcb->header.port_handle = wka_port->handle; 1103 req->qtcb->header.port_handle = wka_port->handle;
1104 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1105 req->qtcb->bottom.support.timeout = ct->timeout;
1106 req->data = ct; 1104 req->data = ct;
1107 1105
1108 zfcp_san_dbf_event_ct_request(req); 1106 zfcp_dbf_san_ct_request(req);
1109
1110 if (erp_action) {
1111 erp_action->fsf_req = req;
1112 req->erp_action = erp_action;
1113 zfcp_fsf_start_erp_timer(req);
1114 } else
1115 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1116 1107
1117 ret = zfcp_fsf_req_send(req); 1108 ret = zfcp_fsf_req_send(req);
1118 if (ret) 1109 if (ret)
@@ -1122,10 +1113,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1122 1113
1123failed_send: 1114failed_send:
1124 zfcp_fsf_req_free(req); 1115 zfcp_fsf_req_free(req);
1125 if (erp_action)
1126 erp_action->fsf_req = NULL;
1127out: 1116out:
1128 spin_unlock_bh(&adapter->req_q_lock); 1117 spin_unlock_bh(&qdio->req_q_lock);
1129 return ret; 1118 return ret;
1130} 1119}
1131 1120
@@ -1142,7 +1131,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1142 1131
1143 switch (header->fsf_status) { 1132 switch (header->fsf_status) {
1144 case FSF_GOOD: 1133 case FSF_GOOD:
1145 zfcp_san_dbf_event_els_response(req); 1134 zfcp_dbf_san_els_response(req);
1146 send_els->status = 0; 1135 send_els->status = 0;
1147 break; 1136 break;
1148 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1137 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1152,7 +1141,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1152 switch (header->fsf_status_qual.word[0]){ 1141 switch (header->fsf_status_qual.word[0]){
1153 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1142 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1154 if (port && (send_els->ls_code != ZFCP_LS_ADISC)) 1143 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1155 zfcp_test_link(port); 1144 zfcp_fc_test_link(port);
1156 /*fall through */ 1145 /*fall through */
1157 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1146 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1158 case FSF_SQ_RETRY_IF_POSSIBLE: 1147 case FSF_SQ_RETRY_IF_POSSIBLE:
@@ -1188,35 +1177,32 @@ skip_fsfstatus:
1188int zfcp_fsf_send_els(struct zfcp_send_els *els) 1177int zfcp_fsf_send_els(struct zfcp_send_els *els)
1189{ 1178{
1190 struct zfcp_fsf_req *req; 1179 struct zfcp_fsf_req *req;
1191 struct zfcp_adapter *adapter = els->adapter; 1180 struct zfcp_qdio *qdio = els->adapter->qdio;
1192 struct fsf_qtcb_bottom_support *bottom;
1193 int ret = -EIO; 1181 int ret = -EIO;
1194 1182
1195 spin_lock_bh(&adapter->req_q_lock); 1183 spin_lock_bh(&qdio->req_q_lock);
1196 if (zfcp_fsf_req_sbal_get(adapter)) 1184 if (zfcp_fsf_req_sbal_get(qdio))
1197 goto out; 1185 goto out;
1198 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1186
1199 ZFCP_REQ_AUTO_CLEANUP, NULL); 1187 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
1188
1200 if (IS_ERR(req)) { 1189 if (IS_ERR(req)) {
1201 ret = PTR_ERR(req); 1190 ret = PTR_ERR(req);
1202 goto out; 1191 goto out;
1203 } 1192 }
1204 1193
1194 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1205 ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2); 1195 ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
1206 1196
1207 if (ret) 1197 if (ret)
1208 goto failed_send; 1198 goto failed_send;
1209 1199
1210 bottom = &req->qtcb->bottom.support; 1200 req->qtcb->bottom.support.d_id = els->d_id;
1211 req->handler = zfcp_fsf_send_els_handler; 1201 req->handler = zfcp_fsf_send_els_handler;
1212 bottom->d_id = els->d_id;
1213 bottom->service_class = FSF_CLASS_3;
1214 bottom->timeout = 2 * R_A_TOV;
1215 req->data = els; 1202 req->data = els;
1216 1203
1217 zfcp_san_dbf_event_els_request(req); 1204 zfcp_dbf_san_els_request(req);
1218 1205
1219 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1220 ret = zfcp_fsf_req_send(req); 1206 ret = zfcp_fsf_req_send(req);
1221 if (ret) 1207 if (ret)
1222 goto failed_send; 1208 goto failed_send;
@@ -1226,7 +1212,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1226failed_send: 1212failed_send:
1227 zfcp_fsf_req_free(req); 1213 zfcp_fsf_req_free(req);
1228out: 1214out:
1229 spin_unlock_bh(&adapter->req_q_lock); 1215 spin_unlock_bh(&qdio->req_q_lock);
1230 return ret; 1216 return ret;
1231} 1217}
1232 1218
@@ -1234,22 +1220,23 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1234{ 1220{
1235 struct qdio_buffer_element *sbale; 1221 struct qdio_buffer_element *sbale;
1236 struct zfcp_fsf_req *req; 1222 struct zfcp_fsf_req *req;
1237 struct zfcp_adapter *adapter = erp_action->adapter; 1223 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1238 int retval = -EIO; 1224 int retval = -EIO;
1239 1225
1240 spin_lock_bh(&adapter->req_q_lock); 1226 spin_lock_bh(&qdio->req_q_lock);
1241 if (zfcp_fsf_req_sbal_get(adapter)) 1227 if (zfcp_fsf_req_sbal_get(qdio))
1242 goto out; 1228 goto out;
1243 req = zfcp_fsf_req_create(adapter, 1229
1244 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1230 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1245 ZFCP_REQ_AUTO_CLEANUP, 1231 qdio->adapter->pool.erp_req);
1246 adapter->pool.fsf_req_erp); 1232
1247 if (IS_ERR(req)) { 1233 if (IS_ERR(req)) {
1248 retval = PTR_ERR(req); 1234 retval = PTR_ERR(req);
1249 goto out; 1235 goto out;
1250 } 1236 }
1251 1237
1252 sbale = zfcp_qdio_sbale_req(req); 1238 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1239 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1253 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1240 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1254 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1241 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1255 1242
@@ -1269,29 +1256,29 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1269 erp_action->fsf_req = NULL; 1256 erp_action->fsf_req = NULL;
1270 } 1257 }
1271out: 1258out:
1272 spin_unlock_bh(&adapter->req_q_lock); 1259 spin_unlock_bh(&qdio->req_q_lock);
1273 return retval; 1260 return retval;
1274} 1261}
1275 1262
1276int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, 1263int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1277 struct fsf_qtcb_bottom_config *data) 1264 struct fsf_qtcb_bottom_config *data)
1278{ 1265{
1279 struct qdio_buffer_element *sbale; 1266 struct qdio_buffer_element *sbale;
1280 struct zfcp_fsf_req *req = NULL; 1267 struct zfcp_fsf_req *req = NULL;
1281 int retval = -EIO; 1268 int retval = -EIO;
1282 1269
1283 spin_lock_bh(&adapter->req_q_lock); 1270 spin_lock_bh(&qdio->req_q_lock);
1284 if (zfcp_fsf_req_sbal_get(adapter)) 1271 if (zfcp_fsf_req_sbal_get(qdio))
1285 goto out_unlock; 1272 goto out_unlock;
1286 1273
1287 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1274 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
1288 0, NULL); 1275
1289 if (IS_ERR(req)) { 1276 if (IS_ERR(req)) {
1290 retval = PTR_ERR(req); 1277 retval = PTR_ERR(req);
1291 goto out_unlock; 1278 goto out_unlock;
1292 } 1279 }
1293 1280
1294 sbale = zfcp_qdio_sbale_req(req); 1281 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1295 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1282 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1296 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1283 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1297 req->handler = zfcp_fsf_exchange_config_data_handler; 1284 req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1307,16 +1294,15 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1307 1294
1308 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1295 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1309 retval = zfcp_fsf_req_send(req); 1296 retval = zfcp_fsf_req_send(req);
1310 spin_unlock_bh(&adapter->req_q_lock); 1297 spin_unlock_bh(&qdio->req_q_lock);
1311 if (!retval) 1298 if (!retval)
1312 wait_event(req->completion_wq, 1299 wait_for_completion(&req->completion);
1313 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
1314 1300
1315 zfcp_fsf_req_free(req); 1301 zfcp_fsf_req_free(req);
1316 return retval; 1302 return retval;
1317 1303
1318out_unlock: 1304out_unlock:
1319 spin_unlock_bh(&adapter->req_q_lock); 1305 spin_unlock_bh(&qdio->req_q_lock);
1320 return retval; 1306 return retval;
1321} 1307}
1322 1308
@@ -1327,26 +1313,28 @@ out_unlock:
1327 */ 1313 */
1328int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1314int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1329{ 1315{
1316 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1330 struct qdio_buffer_element *sbale; 1317 struct qdio_buffer_element *sbale;
1331 struct zfcp_fsf_req *req; 1318 struct zfcp_fsf_req *req;
1332 struct zfcp_adapter *adapter = erp_action->adapter;
1333 int retval = -EIO; 1319 int retval = -EIO;
1334 1320
1335 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1321 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1336 return -EOPNOTSUPP; 1322 return -EOPNOTSUPP;
1337 1323
1338 spin_lock_bh(&adapter->req_q_lock); 1324 spin_lock_bh(&qdio->req_q_lock);
1339 if (zfcp_fsf_req_sbal_get(adapter)) 1325 if (zfcp_fsf_req_sbal_get(qdio))
1340 goto out; 1326 goto out;
1341 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1327
1342 ZFCP_REQ_AUTO_CLEANUP, 1328 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1343 adapter->pool.fsf_req_erp); 1329 qdio->adapter->pool.erp_req);
1330
1344 if (IS_ERR(req)) { 1331 if (IS_ERR(req)) {
1345 retval = PTR_ERR(req); 1332 retval = PTR_ERR(req);
1346 goto out; 1333 goto out;
1347 } 1334 }
1348 1335
1349 sbale = zfcp_qdio_sbale_req(req); 1336 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1337 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1350 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1338 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1351 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1339 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1352 1340
@@ -1361,32 +1349,32 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1361 erp_action->fsf_req = NULL; 1349 erp_action->fsf_req = NULL;
1362 } 1350 }
1363out: 1351out:
1364 spin_unlock_bh(&adapter->req_q_lock); 1352 spin_unlock_bh(&qdio->req_q_lock);
1365 return retval; 1353 return retval;
1366} 1354}
1367 1355
1368/** 1356/**
1369 * zfcp_fsf_exchange_port_data_sync - request information about local port 1357 * zfcp_fsf_exchange_port_data_sync - request information about local port
1370 * @adapter: pointer to struct zfcp_adapter 1358 * @qdio: pointer to struct zfcp_qdio
1371 * @data: pointer to struct fsf_qtcb_bottom_port 1359 * @data: pointer to struct fsf_qtcb_bottom_port
1372 * Returns: 0 on success, error otherwise 1360 * Returns: 0 on success, error otherwise
1373 */ 1361 */
1374int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, 1362int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1375 struct fsf_qtcb_bottom_port *data) 1363 struct fsf_qtcb_bottom_port *data)
1376{ 1364{
1377 struct qdio_buffer_element *sbale; 1365 struct qdio_buffer_element *sbale;
1378 struct zfcp_fsf_req *req = NULL; 1366 struct zfcp_fsf_req *req = NULL;
1379 int retval = -EIO; 1367 int retval = -EIO;
1380 1368
1381 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1369 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1382 return -EOPNOTSUPP; 1370 return -EOPNOTSUPP;
1383 1371
1384 spin_lock_bh(&adapter->req_q_lock); 1372 spin_lock_bh(&qdio->req_q_lock);
1385 if (zfcp_fsf_req_sbal_get(adapter)) 1373 if (zfcp_fsf_req_sbal_get(qdio))
1386 goto out_unlock; 1374 goto out_unlock;
1387 1375
1388 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, 1376 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
1389 NULL); 1377
1390 if (IS_ERR(req)) { 1378 if (IS_ERR(req)) {
1391 retval = PTR_ERR(req); 1379 retval = PTR_ERR(req);
1392 goto out_unlock; 1380 goto out_unlock;
@@ -1395,24 +1383,24 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1395 if (data) 1383 if (data)
1396 req->data = data; 1384 req->data = data;
1397 1385
1398 sbale = zfcp_qdio_sbale_req(req); 1386 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1399 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1387 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1400 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1388 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1401 1389
1402 req->handler = zfcp_fsf_exchange_port_data_handler; 1390 req->handler = zfcp_fsf_exchange_port_data_handler;
1403 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1391 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1404 retval = zfcp_fsf_req_send(req); 1392 retval = zfcp_fsf_req_send(req);
1405 spin_unlock_bh(&adapter->req_q_lock); 1393 spin_unlock_bh(&qdio->req_q_lock);
1406 1394
1407 if (!retval) 1395 if (!retval)
1408 wait_event(req->completion_wq, 1396 wait_for_completion(&req->completion);
1409 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1397
1410 zfcp_fsf_req_free(req); 1398 zfcp_fsf_req_free(req);
1411 1399
1412 return retval; 1400 return retval;
1413 1401
1414out_unlock: 1402out_unlock:
1415 spin_unlock_bh(&adapter->req_q_lock); 1403 spin_unlock_bh(&qdio->req_q_lock);
1416 return retval; 1404 return retval;
1417} 1405}
1418 1406
@@ -1498,25 +1486,25 @@ out:
1498int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1486int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1499{ 1487{
1500 struct qdio_buffer_element *sbale; 1488 struct qdio_buffer_element *sbale;
1501 struct zfcp_adapter *adapter = erp_action->adapter; 1489 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1502 struct zfcp_fsf_req *req;
1503 struct zfcp_port *port = erp_action->port; 1490 struct zfcp_port *port = erp_action->port;
1491 struct zfcp_fsf_req *req;
1504 int retval = -EIO; 1492 int retval = -EIO;
1505 1493
1506 spin_lock_bh(&adapter->req_q_lock); 1494 spin_lock_bh(&qdio->req_q_lock);
1507 if (zfcp_fsf_req_sbal_get(adapter)) 1495 if (zfcp_fsf_req_sbal_get(qdio))
1508 goto out; 1496 goto out;
1509 1497
1510 req = zfcp_fsf_req_create(adapter, 1498 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1511 FSF_QTCB_OPEN_PORT_WITH_DID, 1499 qdio->adapter->pool.erp_req);
1512 ZFCP_REQ_AUTO_CLEANUP, 1500
1513 adapter->pool.fsf_req_erp);
1514 if (IS_ERR(req)) { 1501 if (IS_ERR(req)) {
1515 retval = PTR_ERR(req); 1502 retval = PTR_ERR(req);
1516 goto out; 1503 goto out;
1517 } 1504 }
1518 1505
1519 sbale = zfcp_qdio_sbale_req(req); 1506 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1507 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1520 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1508 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1521 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1509 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1522 1510
@@ -1535,7 +1523,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1535 zfcp_port_put(port); 1523 zfcp_port_put(port);
1536 } 1524 }
1537out: 1525out:
1538 spin_unlock_bh(&adapter->req_q_lock); 1526 spin_unlock_bh(&qdio->req_q_lock);
1539 return retval; 1527 return retval;
1540} 1528}
1541 1529
@@ -1569,23 +1557,24 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1569int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1557int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1570{ 1558{
1571 struct qdio_buffer_element *sbale; 1559 struct qdio_buffer_element *sbale;
1572 struct zfcp_adapter *adapter = erp_action->adapter; 1560 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1573 struct zfcp_fsf_req *req; 1561 struct zfcp_fsf_req *req;
1574 int retval = -EIO; 1562 int retval = -EIO;
1575 1563
1576 spin_lock_bh(&adapter->req_q_lock); 1564 spin_lock_bh(&qdio->req_q_lock);
1577 if (zfcp_fsf_req_sbal_get(adapter)) 1565 if (zfcp_fsf_req_sbal_get(qdio))
1578 goto out; 1566 goto out;
1579 1567
1580 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, 1568 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1581 ZFCP_REQ_AUTO_CLEANUP, 1569 qdio->adapter->pool.erp_req);
1582 adapter->pool.fsf_req_erp); 1570
1583 if (IS_ERR(req)) { 1571 if (IS_ERR(req)) {
1584 retval = PTR_ERR(req); 1572 retval = PTR_ERR(req);
1585 goto out; 1573 goto out;
1586 } 1574 }
1587 1575
1588 sbale = zfcp_qdio_sbale_req(req); 1576 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1577 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1589 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1578 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1590 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1579 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1591 1580
@@ -1602,7 +1591,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1602 erp_action->fsf_req = NULL; 1591 erp_action->fsf_req = NULL;
1603 } 1592 }
1604out: 1593out:
1605 spin_unlock_bh(&adapter->req_q_lock); 1594 spin_unlock_bh(&qdio->req_q_lock);
1606 return retval; 1595 return retval;
1607} 1596}
1608 1597
@@ -1645,24 +1634,24 @@ out:
1645int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) 1634int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1646{ 1635{
1647 struct qdio_buffer_element *sbale; 1636 struct qdio_buffer_element *sbale;
1648 struct zfcp_adapter *adapter = wka_port->adapter; 1637 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1649 struct zfcp_fsf_req *req; 1638 struct zfcp_fsf_req *req;
1650 int retval = -EIO; 1639 int retval = -EIO;
1651 1640
1652 spin_lock_bh(&adapter->req_q_lock); 1641 spin_lock_bh(&qdio->req_q_lock);
1653 if (zfcp_fsf_req_sbal_get(adapter)) 1642 if (zfcp_fsf_req_sbal_get(qdio))
1654 goto out; 1643 goto out;
1655 1644
1656 req = zfcp_fsf_req_create(adapter, 1645 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1657 FSF_QTCB_OPEN_PORT_WITH_DID, 1646 qdio->adapter->pool.erp_req);
1658 ZFCP_REQ_AUTO_CLEANUP, 1647
1659 adapter->pool.fsf_req_erp);
1660 if (unlikely(IS_ERR(req))) { 1648 if (unlikely(IS_ERR(req))) {
1661 retval = PTR_ERR(req); 1649 retval = PTR_ERR(req);
1662 goto out; 1650 goto out;
1663 } 1651 }
1664 1652
1665 sbale = zfcp_qdio_sbale_req(req); 1653 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1654 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1666 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1655 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1667 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1656 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1668 1657
@@ -1675,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1675 if (retval) 1664 if (retval)
1676 zfcp_fsf_req_free(req); 1665 zfcp_fsf_req_free(req);
1677out: 1666out:
1678 spin_unlock_bh(&adapter->req_q_lock); 1667 spin_unlock_bh(&qdio->req_q_lock);
1679 return retval; 1668 return retval;
1680} 1669}
1681 1670
@@ -1700,23 +1689,24 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1700int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) 1689int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1701{ 1690{
1702 struct qdio_buffer_element *sbale; 1691 struct qdio_buffer_element *sbale;
1703 struct zfcp_adapter *adapter = wka_port->adapter; 1692 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1704 struct zfcp_fsf_req *req; 1693 struct zfcp_fsf_req *req;
1705 int retval = -EIO; 1694 int retval = -EIO;
1706 1695
1707 spin_lock_bh(&adapter->req_q_lock); 1696 spin_lock_bh(&qdio->req_q_lock);
1708 if (zfcp_fsf_req_sbal_get(adapter)) 1697 if (zfcp_fsf_req_sbal_get(qdio))
1709 goto out; 1698 goto out;
1710 1699
1711 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, 1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1712 ZFCP_REQ_AUTO_CLEANUP, 1701 qdio->adapter->pool.erp_req);
1713 adapter->pool.fsf_req_erp); 1702
1714 if (unlikely(IS_ERR(req))) { 1703 if (unlikely(IS_ERR(req))) {
1715 retval = PTR_ERR(req); 1704 retval = PTR_ERR(req);
1716 goto out; 1705 goto out;
1717 } 1706 }
1718 1707
1719 sbale = zfcp_qdio_sbale_req(req); 1708 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1709 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1720 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1710 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1721 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1711 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1722 1712
@@ -1729,7 +1719,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1729 if (retval) 1719 if (retval)
1730 zfcp_fsf_req_free(req); 1720 zfcp_fsf_req_free(req);
1731out: 1721out:
1732 spin_unlock_bh(&adapter->req_q_lock); 1722 spin_unlock_bh(&qdio->req_q_lock);
1733 return retval; 1723 return retval;
1734} 1724}
1735 1725
@@ -1791,23 +1781,24 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1791int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1781int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1792{ 1782{
1793 struct qdio_buffer_element *sbale; 1783 struct qdio_buffer_element *sbale;
1794 struct zfcp_adapter *adapter = erp_action->adapter; 1784 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1795 struct zfcp_fsf_req *req; 1785 struct zfcp_fsf_req *req;
1796 int retval = -EIO; 1786 int retval = -EIO;
1797 1787
1798 spin_lock_bh(&adapter->req_q_lock); 1788 spin_lock_bh(&qdio->req_q_lock);
1799 if (zfcp_fsf_req_sbal_get(adapter)) 1789 if (zfcp_fsf_req_sbal_get(qdio))
1800 goto out; 1790 goto out;
1801 1791
1802 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1792 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1803 ZFCP_REQ_AUTO_CLEANUP, 1793 qdio->adapter->pool.erp_req);
1804 adapter->pool.fsf_req_erp); 1794
1805 if (IS_ERR(req)) { 1795 if (IS_ERR(req)) {
1806 retval = PTR_ERR(req); 1796 retval = PTR_ERR(req);
1807 goto out; 1797 goto out;
1808 } 1798 }
1809 1799
1810 sbale = zfcp_qdio_sbale_req(req); 1800 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1801 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1811 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1802 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1812 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1803 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1813 1804
@@ -1824,7 +1815,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1824 erp_action->fsf_req = NULL; 1815 erp_action->fsf_req = NULL;
1825 } 1816 }
1826out: 1817out:
1827 spin_unlock_bh(&adapter->req_q_lock); 1818 spin_unlock_bh(&qdio->req_q_lock);
1828 return retval; 1819 return retval;
1829} 1820}
1830 1821
@@ -1895,7 +1886,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1895 case FSF_ADAPTER_STATUS_AVAILABLE: 1886 case FSF_ADAPTER_STATUS_AVAILABLE:
1896 switch (header->fsf_status_qual.word[0]) { 1887 switch (header->fsf_status_qual.word[0]) {
1897 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1888 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1898 zfcp_test_link(unit->port); 1889 zfcp_fc_test_link(unit->port);
1899 /* fall through */ 1890 /* fall through */
1900 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1891 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1901 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1892 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1964,22 +1955,24 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1964{ 1955{
1965 struct qdio_buffer_element *sbale; 1956 struct qdio_buffer_element *sbale;
1966 struct zfcp_adapter *adapter = erp_action->adapter; 1957 struct zfcp_adapter *adapter = erp_action->adapter;
1958 struct zfcp_qdio *qdio = adapter->qdio;
1967 struct zfcp_fsf_req *req; 1959 struct zfcp_fsf_req *req;
1968 int retval = -EIO; 1960 int retval = -EIO;
1969 1961
1970 spin_lock_bh(&adapter->req_q_lock); 1962 spin_lock_bh(&qdio->req_q_lock);
1971 if (zfcp_fsf_req_sbal_get(adapter)) 1963 if (zfcp_fsf_req_sbal_get(qdio))
1972 goto out; 1964 goto out;
1973 1965
1974 req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, 1966 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1975 ZFCP_REQ_AUTO_CLEANUP, 1967 adapter->pool.erp_req);
1976 adapter->pool.fsf_req_erp); 1968
1977 if (IS_ERR(req)) { 1969 if (IS_ERR(req)) {
1978 retval = PTR_ERR(req); 1970 retval = PTR_ERR(req);
1979 goto out; 1971 goto out;
1980 } 1972 }
1981 1973
1982 sbale = zfcp_qdio_sbale_req(req); 1974 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1975 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1983 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1976 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1984 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1977 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1985 1978
@@ -2000,7 +1993,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2000 erp_action->fsf_req = NULL; 1993 erp_action->fsf_req = NULL;
2001 } 1994 }
2002out: 1995out:
2003 spin_unlock_bh(&adapter->req_q_lock); 1996 spin_unlock_bh(&qdio->req_q_lock);
2004 return retval; 1997 return retval;
2005} 1998}
2006 1999
@@ -2028,7 +2021,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2028 case FSF_ADAPTER_STATUS_AVAILABLE: 2021 case FSF_ADAPTER_STATUS_AVAILABLE:
2029 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2022 switch (req->qtcb->header.fsf_status_qual.word[0]) {
2030 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2023 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2031 zfcp_test_link(unit->port); 2024 zfcp_fc_test_link(unit->port);
2032 /* fall through */ 2025 /* fall through */
2033 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2026 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2034 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2027 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -2049,22 +2042,24 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2049int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 2042int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2050{ 2043{
2051 struct qdio_buffer_element *sbale; 2044 struct qdio_buffer_element *sbale;
2052 struct zfcp_adapter *adapter = erp_action->adapter; 2045 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2053 struct zfcp_fsf_req *req; 2046 struct zfcp_fsf_req *req;
2054 int retval = -EIO; 2047 int retval = -EIO;
2055 2048
2056 spin_lock_bh(&adapter->req_q_lock); 2049 spin_lock_bh(&qdio->req_q_lock);
2057 if (zfcp_fsf_req_sbal_get(adapter)) 2050 if (zfcp_fsf_req_sbal_get(qdio))
2058 goto out; 2051 goto out;
2059 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, 2052
2060 ZFCP_REQ_AUTO_CLEANUP, 2053 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2061 adapter->pool.fsf_req_erp); 2054 qdio->adapter->pool.erp_req);
2055
2062 if (IS_ERR(req)) { 2056 if (IS_ERR(req)) {
2063 retval = PTR_ERR(req); 2057 retval = PTR_ERR(req);
2064 goto out; 2058 goto out;
2065 } 2059 }
2066 2060
2067 sbale = zfcp_qdio_sbale_req(req); 2061 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2062 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2068 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2063 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2069 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2064 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2070 2065
@@ -2082,7 +2077,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2082 erp_action->fsf_req = NULL; 2077 erp_action->fsf_req = NULL;
2083 } 2078 }
2084out: 2079out:
2085 spin_unlock_bh(&adapter->req_q_lock); 2080 spin_unlock_bh(&qdio->req_q_lock);
2086 return retval; 2081 return retval;
2087} 2082}
2088 2083
@@ -2141,8 +2136,8 @@ static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2141 } 2136 }
2142 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2137 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2143 trace.flags |= ZFCP_BLK_REQ_ERROR; 2138 trace.flags |= ZFCP_BLK_REQ_ERROR;
2144 trace.inb_usage = fsf_req->qdio_inb_usage; 2139 trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
2145 trace.outb_usage = fsf_req->qdio_outb_usage; 2140 trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
2146 2141
2147 blk_add_driver_data(req->q, req, &trace, sizeof(trace)); 2142 blk_add_driver_data(req->q, req, &trace, sizeof(trace));
2148} 2143}
@@ -2215,11 +2210,11 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2215 } 2210 }
2216skip_fsfstatus: 2211skip_fsfstatus:
2217 if (scpnt->result != 0) 2212 if (scpnt->result != 0)
2218 zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req); 2213 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
2219 else if (scpnt->retries > 0) 2214 else if (scpnt->retries > 0)
2220 zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req); 2215 zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2221 else 2216 else
2222 zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req); 2217 zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2223 2218
2224 scpnt->host_scribble = NULL; 2219 scpnt->host_scribble = NULL;
2225 (scpnt->scsi_done) (scpnt); 2220 (scpnt->scsi_done) (scpnt);
@@ -2309,7 +2304,7 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2309 case FSF_ADAPTER_STATUS_AVAILABLE: 2304 case FSF_ADAPTER_STATUS_AVAILABLE:
2310 if (header->fsf_status_qual.word[0] == 2305 if (header->fsf_status_qual.word[0] ==
2311 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2306 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2312 zfcp_test_link(unit->port); 2307 zfcp_fc_test_link(unit->port);
2313 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2308 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2314 break; 2309 break;
2315 } 2310 }
@@ -2350,24 +2345,27 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2350 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2345 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2351 int real_bytes, retval = -EIO; 2346 int real_bytes, retval = -EIO;
2352 struct zfcp_adapter *adapter = unit->port->adapter; 2347 struct zfcp_adapter *adapter = unit->port->adapter;
2348 struct zfcp_qdio *qdio = adapter->qdio;
2353 2349
2354 if (unlikely(!(atomic_read(&unit->status) & 2350 if (unlikely(!(atomic_read(&unit->status) &
2355 ZFCP_STATUS_COMMON_UNBLOCKED))) 2351 ZFCP_STATUS_COMMON_UNBLOCKED)))
2356 return -EBUSY; 2352 return -EBUSY;
2357 2353
2358 spin_lock(&adapter->req_q_lock); 2354 spin_lock(&qdio->req_q_lock);
2359 if (atomic_read(&adapter->req_q.count) <= 0) { 2355 if (atomic_read(&qdio->req_q.count) <= 0) {
2360 atomic_inc(&adapter->qdio_outb_full); 2356 atomic_inc(&qdio->req_q_full);
2361 goto out; 2357 goto out;
2362 } 2358 }
2363 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 2359
2364 ZFCP_REQ_AUTO_CLEANUP, 2360 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2365 adapter->pool.fsf_req_scsi); 2361 adapter->pool.scsi_req);
2362
2366 if (IS_ERR(req)) { 2363 if (IS_ERR(req)) {
2367 retval = PTR_ERR(req); 2364 retval = PTR_ERR(req);
2368 goto out; 2365 goto out;
2369 } 2366 }
2370 2367
2368 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2371 zfcp_unit_get(unit); 2369 zfcp_unit_get(unit);
2372 req->unit = unit; 2370 req->unit = unit;
2373 req->data = scsi_cmnd; 2371 req->data = scsi_cmnd;
@@ -2419,11 +2417,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2419 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2417 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2420 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); 2418 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2421 2419
2422 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, 2420 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2423 scsi_sglist(scsi_cmnd), 2421 scsi_sglist(scsi_cmnd),
2424 FSF_MAX_SBALS_PER_REQ); 2422 FSF_MAX_SBALS_PER_REQ);
2425 if (unlikely(real_bytes < 0)) { 2423 if (unlikely(real_bytes < 0)) {
2426 if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) { 2424 if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2427 dev_err(&adapter->ccw_device->dev, 2425 dev_err(&adapter->ccw_device->dev,
2428 "Oversize data package, unit 0x%016Lx " 2426 "Oversize data package, unit 0x%016Lx "
2429 "on port 0x%016Lx closed\n", 2427 "on port 0x%016Lx closed\n",
@@ -2448,7 +2446,7 @@ failed_scsi_cmnd:
2448 zfcp_fsf_req_free(req); 2446 zfcp_fsf_req_free(req);
2449 scsi_cmnd->host_scribble = NULL; 2447 scsi_cmnd->host_scribble = NULL;
2450out: 2448out:
2451 spin_unlock(&adapter->req_q_lock); 2449 spin_unlock(&qdio->req_q_lock);
2452 return retval; 2450 return retval;
2453} 2451}
2454 2452
@@ -2463,17 +2461,19 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2463 struct qdio_buffer_element *sbale; 2461 struct qdio_buffer_element *sbale;
2464 struct zfcp_fsf_req *req = NULL; 2462 struct zfcp_fsf_req *req = NULL;
2465 struct fcp_cmnd_iu *fcp_cmnd_iu; 2463 struct fcp_cmnd_iu *fcp_cmnd_iu;
2466 struct zfcp_adapter *adapter = unit->port->adapter; 2464 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2467 2465
2468 if (unlikely(!(atomic_read(&unit->status) & 2466 if (unlikely(!(atomic_read(&unit->status) &
2469 ZFCP_STATUS_COMMON_UNBLOCKED))) 2467 ZFCP_STATUS_COMMON_UNBLOCKED)))
2470 return NULL; 2468 return NULL;
2471 2469
2472 spin_lock_bh(&adapter->req_q_lock); 2470 spin_lock_bh(&qdio->req_q_lock);
2473 if (zfcp_fsf_req_sbal_get(adapter)) 2471 if (zfcp_fsf_req_sbal_get(qdio))
2474 goto out; 2472 goto out;
2475 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0, 2473
2476 adapter->pool.fsf_req_scsi); 2474 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2475 qdio->adapter->pool.scsi_req);
2476
2477 if (IS_ERR(req)) { 2477 if (IS_ERR(req)) {
2478 req = NULL; 2478 req = NULL;
2479 goto out; 2479 goto out;
@@ -2489,7 +2489,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2489 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2489 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2490 sizeof(u32); 2490 sizeof(u32);
2491 2491
2492 sbale = zfcp_qdio_sbale_req(req); 2492 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2493 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2493 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2494 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2494 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2495 2495
@@ -2504,7 +2504,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2504 zfcp_fsf_req_free(req); 2504 zfcp_fsf_req_free(req);
2505 req = NULL; 2505 req = NULL;
2506out: 2506out:
2507 spin_unlock_bh(&adapter->req_q_lock); 2507 spin_unlock_bh(&qdio->req_q_lock);
2508 return req; 2508 return req;
2509} 2509}
2510 2510
@@ -2522,6 +2522,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2522 struct zfcp_fsf_cfdc *fsf_cfdc) 2522 struct zfcp_fsf_cfdc *fsf_cfdc)
2523{ 2523{
2524 struct qdio_buffer_element *sbale; 2524 struct qdio_buffer_element *sbale;
2525 struct zfcp_qdio *qdio = adapter->qdio;
2525 struct zfcp_fsf_req *req = NULL; 2526 struct zfcp_fsf_req *req = NULL;
2526 struct fsf_qtcb_bottom_support *bottom; 2527 struct fsf_qtcb_bottom_support *bottom;
2527 int direction, retval = -EIO, bytes; 2528 int direction, retval = -EIO, bytes;
@@ -2540,11 +2541,11 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2540 return ERR_PTR(-EINVAL); 2541 return ERR_PTR(-EINVAL);
2541 } 2542 }
2542 2543
2543 spin_lock_bh(&adapter->req_q_lock); 2544 spin_lock_bh(&qdio->req_q_lock);
2544 if (zfcp_fsf_req_sbal_get(adapter)) 2545 if (zfcp_fsf_req_sbal_get(qdio))
2545 goto out; 2546 goto out;
2546 2547
2547 req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL); 2548 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
2548 if (IS_ERR(req)) { 2549 if (IS_ERR(req)) {
2549 retval = -EPERM; 2550 retval = -EPERM;
2550 goto out; 2551 goto out;
@@ -2552,14 +2553,15 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2552 2553
2553 req->handler = zfcp_fsf_control_file_handler; 2554 req->handler = zfcp_fsf_control_file_handler;
2554 2555
2555 sbale = zfcp_qdio_sbale_req(req); 2556 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2556 sbale[0].flags |= direction; 2557 sbale[0].flags |= direction;
2557 2558
2558 bottom = &req->qtcb->bottom.support; 2559 bottom = &req->qtcb->bottom.support;
2559 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2560 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2560 bottom->option = fsf_cfdc->option; 2561 bottom->option = fsf_cfdc->option;
2561 2562
2562 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, 2563 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
2564 direction, fsf_cfdc->sg,
2563 FSF_MAX_SBALS_PER_REQ); 2565 FSF_MAX_SBALS_PER_REQ);
2564 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2566 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2565 zfcp_fsf_req_free(req); 2567 zfcp_fsf_req_free(req);
@@ -2569,12 +2571,53 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2569 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2571 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2570 retval = zfcp_fsf_req_send(req); 2572 retval = zfcp_fsf_req_send(req);
2571out: 2573out:
2572 spin_unlock_bh(&adapter->req_q_lock); 2574 spin_unlock_bh(&qdio->req_q_lock);
2573 2575
2574 if (!retval) { 2576 if (!retval) {
2575 wait_event(req->completion_wq, 2577 wait_for_completion(&req->completion);
2576 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2577 return req; 2578 return req;
2578 } 2579 }
2579 return ERR_PTR(retval); 2580 return ERR_PTR(retval);
2580} 2581}
2582
2583/**
2584 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2585 * @adapter: pointer to struct zfcp_adapter
2586 * @sbal_idx: response queue index of SBAL to be processed
2587 */
2588void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2589{
2590 struct zfcp_adapter *adapter = qdio->adapter;
2591 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2592 struct qdio_buffer_element *sbale;
2593 struct zfcp_fsf_req *fsf_req;
2594 unsigned long flags, req_id;
2595 int idx;
2596
2597 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2598
2599 sbale = &sbal->element[idx];
2600 req_id = (unsigned long) sbale->addr;
2601 spin_lock_irqsave(&adapter->req_list_lock, flags);
2602 fsf_req = zfcp_reqlist_find(adapter, req_id);
2603
2604 if (!fsf_req)
2605 /*
2606 * Unknown request means that we have potentially memory
2607 * corruption and must stop the machine immediately.
2608 */
2609 panic("error: unknown req_id (%lx) on adapter %s.\n",
2610 req_id, dev_name(&adapter->ccw_device->dev));
2611
2612 list_del(&fsf_req->list);
2613 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
2614
2615 fsf_req->queue_req.sbal_response = sbal_idx;
2616 fsf_req->queue_req.qdio_inb_usage =
2617 atomic_read(&qdio->resp_q.count);
2618 zfcp_fsf_req_complete(fsf_req);
2619
2620 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
2621 break;
2622 }
2623}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index df7f232faba8..dcc7c1dbcf58 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,13 +3,14 @@
3 * 3 *
4 * Interface to the FSF support functions. 4 * Interface to the FSF support functions.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#ifndef FSF_H 9#ifndef FSF_H
10#define FSF_H 10#define FSF_H
11 11
12#include <linux/pfn.h> 12#include <linux/pfn.h>
13#include <linux/scatterlist.h>
13 14
14#define FSF_QTCB_CURRENT_VERSION 0x00000001 15#define FSF_QTCB_CURRENT_VERSION 0x00000001
15 16
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e0a215309df0..6c5228b627fc 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Setup and helper functions to access QDIO. 4 * Setup and helper functions to access QDIO.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -34,29 +34,10 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
34 return &q->sbal[sbal_idx]->element[sbale_idx]; 34 return &q->sbal[sbal_idx]->element[sbale_idx];
35} 35}
36 36
37/** 37static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
38 * zfcp_qdio_free - free memory used by request- and resposne queue
39 * @adapter: pointer to the zfcp_adapter structure
40 */
41void zfcp_qdio_free(struct zfcp_adapter *adapter)
42{ 38{
43 struct qdio_buffer **sbal_req, **sbal_resp; 39 struct zfcp_adapter *adapter = qdio->adapter;
44 int p;
45
46 if (adapter->ccw_device)
47 qdio_free(adapter->ccw_device);
48
49 sbal_req = adapter->req_q.sbal;
50 sbal_resp = adapter->resp_q.sbal;
51
52 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
53 free_page((unsigned long) sbal_req[p]);
54 free_page((unsigned long) sbal_resp[p]);
55 }
56}
57 40
58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id)
59{
60 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 41 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
61 42
62 zfcp_erp_adapter_reopen(adapter, 43 zfcp_erp_adapter_reopen(adapter,
@@ -75,72 +56,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
75} 56}
76 57
77/* this needs to be called prior to updating the queue fill level */ 58/* this needs to be called prior to updating the queue fill level */
78static void zfcp_qdio_account(struct zfcp_adapter *adapter) 59static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
79{ 60{
80 ktime_t now; 61 unsigned long long now, span;
81 s64 span;
82 int free, used; 62 int free, used;
83 63
84 spin_lock(&adapter->qdio_stat_lock); 64 spin_lock(&qdio->stat_lock);
85 now = ktime_get(); 65 now = get_clock_monotonic();
86 span = ktime_us_delta(now, adapter->req_q_time); 66 span = (now - qdio->req_q_time) >> 12;
87 free = max(0, atomic_read(&adapter->req_q.count)); 67 free = atomic_read(&qdio->req_q.count);
88 used = QDIO_MAX_BUFFERS_PER_Q - free; 68 used = QDIO_MAX_BUFFERS_PER_Q - free;
89 adapter->req_q_util += used * span; 69 qdio->req_q_util += used * span;
90 adapter->req_q_time = now; 70 qdio->req_q_time = now;
91 spin_unlock(&adapter->qdio_stat_lock); 71 spin_unlock(&qdio->stat_lock);
92} 72}
93 73
94static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 74static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
95 int queue_no, int first, int count, 75 int queue_no, int first, int count,
96 unsigned long parm) 76 unsigned long parm)
97{ 77{
98 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 78 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
99 struct zfcp_qdio_queue *queue = &adapter->req_q; 79 struct zfcp_qdio_queue *queue = &qdio->req_q;
100 80
101 if (unlikely(qdio_err)) { 81 if (unlikely(qdio_err)) {
102 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); 82 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
103 zfcp_qdio_handler_error(adapter, "qdireq1"); 83 count);
84 zfcp_qdio_handler_error(qdio, "qdireq1");
104 return; 85 return;
105 } 86 }
106 87
107 /* cleanup all SBALs being program-owned now */ 88 /* cleanup all SBALs being program-owned now */
108 zfcp_qdio_zero_sbals(queue->sbal, first, count); 89 zfcp_qdio_zero_sbals(queue->sbal, first, count);
109 90
110 zfcp_qdio_account(adapter); 91 zfcp_qdio_account(qdio);
111 atomic_add(count, &queue->count); 92 atomic_add(count, &queue->count);
112 wake_up(&adapter->request_wq); 93 wake_up(&qdio->req_q_wq);
113}
114
115static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
116 unsigned long req_id, int sbal_idx)
117{
118 struct zfcp_fsf_req *fsf_req;
119 unsigned long flags;
120
121 spin_lock_irqsave(&adapter->req_list_lock, flags);
122 fsf_req = zfcp_reqlist_find(adapter, req_id);
123
124 if (!fsf_req)
125 /*
126 * Unknown request means that we have potentially memory
127 * corruption and must stop the machine immediatly.
128 */
129 panic("error: unknown request id (%lx) on adapter %s.\n",
130 req_id, dev_name(&adapter->ccw_device->dev));
131
132 zfcp_reqlist_remove(adapter, fsf_req);
133 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
134
135 fsf_req->sbal_response = sbal_idx;
136 fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count);
137 zfcp_fsf_req_complete(fsf_req);
138} 94}
139 95
140static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) 96static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
141{ 97{
142 struct zfcp_qdio_queue *queue = &adapter->resp_q; 98 struct zfcp_qdio_queue *queue = &qdio->resp_q;
143 struct ccw_device *cdev = adapter->ccw_device; 99 struct ccw_device *cdev = qdio->adapter->ccw_device;
144 u8 count, start = queue->first; 100 u8 count, start = queue->first;
145 unsigned int retval; 101 unsigned int retval;
146 102
@@ -162,14 +118,13 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
162 int queue_no, int first, int count, 118 int queue_no, int first, int count,
163 unsigned long parm) 119 unsigned long parm)
164{ 120{
165 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 121 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
166 struct zfcp_qdio_queue *queue = &adapter->resp_q; 122 int sbal_idx, sbal_no;
167 struct qdio_buffer_element *sbale;
168 int sbal_idx, sbale_idx, sbal_no;
169 123
170 if (unlikely(qdio_err)) { 124 if (unlikely(qdio_err)) {
171 zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); 125 zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
172 zfcp_qdio_handler_error(adapter, "qdires1"); 126 count);
127 zfcp_qdio_handler_error(qdio, "qdires1");
173 return; 128 return;
174 } 129 }
175 130
@@ -179,39 +134,27 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
179 */ 134 */
180 for (sbal_no = 0; sbal_no < count; sbal_no++) { 135 for (sbal_no = 0; sbal_no < count; sbal_no++) {
181 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 136 sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
182
183 /* go through all SBALEs of SBAL */ 137 /* go through all SBALEs of SBAL */
184 for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; 138 zfcp_fsf_reqid_check(qdio, sbal_idx);
185 sbale_idx++) {
186 sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
187 zfcp_qdio_reqid_check(adapter,
188 (unsigned long) sbale->addr,
189 sbal_idx);
190 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
191 break;
192 };
193
194 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
195 dev_warn(&adapter->ccw_device->dev,
196 "A QDIO protocol error occurred, "
197 "operations continue\n");
198 } 139 }
199 140
200 /* 141 /*
201 * put range of SBALs back to response queue 142 * put range of SBALs back to response queue
202 * (including SBALs which have already been free before) 143 * (including SBALs which have already been free before)
203 */ 144 */
204 zfcp_qdio_resp_put_back(adapter, count); 145 zfcp_qdio_resp_put_back(qdio, count);
205} 146}
206 147
207/** 148/**
208 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req 149 * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
209 * @fsf_req: pointer to struct fsf_req 150 * @qdio: pointer to struct zfcp_qdio
151 * @q_rec: pointer to struct zfcp_queue_rec
210 * Returns: pointer to qdio_buffer_element (SBALE) structure 152 * Returns: pointer to qdio_buffer_element (SBALE) structure
211 */ 153 */
212struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) 154struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
155 struct zfcp_queue_req *q_req)
213{ 156{
214 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); 157 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
215} 158}
216 159
217/** 160/**
@@ -219,74 +162,80 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
219 * @fsf_req: pointer to struct fsf_req 162 * @fsf_req: pointer to struct fsf_req
220 * Returns: pointer to qdio_buffer_element (SBALE) structure 163 * Returns: pointer to qdio_buffer_element (SBALE) structure
221 */ 164 */
222struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) 165struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
166 struct zfcp_queue_req *q_req)
223{ 167{
224 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 168 return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
225 req->sbale_curr); 169 q_req->sbale_curr);
226} 170}
227 171
228static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 172static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
173 struct zfcp_queue_req *q_req, int max_sbals)
229{ 174{
230 int count = atomic_read(&fsf_req->adapter->req_q.count); 175 int count = atomic_read(&qdio->req_q.count);
231 count = min(count, max_sbals); 176 count = min(count, max_sbals);
232 fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) 177 q_req->sbal_limit = (q_req->sbal_first + count - 1)
233 % QDIO_MAX_BUFFERS_PER_Q; 178 % QDIO_MAX_BUFFERS_PER_Q;
234} 179}
235 180
236static struct qdio_buffer_element * 181static struct qdio_buffer_element *
237zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 182zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
183 unsigned long sbtype)
238{ 184{
239 struct qdio_buffer_element *sbale; 185 struct qdio_buffer_element *sbale;
240 186
241 /* set last entry flag in current SBALE of current SBAL */ 187 /* set last entry flag in current SBALE of current SBAL */
242 sbale = zfcp_qdio_sbale_curr(fsf_req); 188 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
243 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 189 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
244 190
245 /* don't exceed last allowed SBAL */ 191 /* don't exceed last allowed SBAL */
246 if (fsf_req->sbal_last == fsf_req->sbal_limit) 192 if (q_req->sbal_last == q_req->sbal_limit)
247 return NULL; 193 return NULL;
248 194
249 /* set chaining flag in first SBALE of current SBAL */ 195 /* set chaining flag in first SBALE of current SBAL */
250 sbale = zfcp_qdio_sbale_req(fsf_req); 196 sbale = zfcp_qdio_sbale_req(qdio, q_req);
251 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 197 sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
252 198
253 /* calculate index of next SBAL */ 199 /* calculate index of next SBAL */
254 fsf_req->sbal_last++; 200 q_req->sbal_last++;
255 fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; 201 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
256 202
257 /* keep this requests number of SBALs up-to-date */ 203 /* keep this requests number of SBALs up-to-date */
258 fsf_req->sbal_number++; 204 q_req->sbal_number++;
259 205
260 /* start at first SBALE of new SBAL */ 206 /* start at first SBALE of new SBAL */
261 fsf_req->sbale_curr = 0; 207 q_req->sbale_curr = 0;
262 208
263 /* set storage-block type for new SBAL */ 209 /* set storage-block type for new SBAL */
264 sbale = zfcp_qdio_sbale_curr(fsf_req); 210 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
265 sbale->flags |= sbtype; 211 sbale->flags |= sbtype;
266 212
267 return sbale; 213 return sbale;
268} 214}
269 215
270static struct qdio_buffer_element * 216static struct qdio_buffer_element *
271zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 217zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
218 unsigned int sbtype)
272{ 219{
273 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 220 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
274 return zfcp_qdio_sbal_chain(fsf_req, sbtype); 221 return zfcp_qdio_sbal_chain(qdio, q_req, sbtype);
275 fsf_req->sbale_curr++; 222 q_req->sbale_curr++;
276 return zfcp_qdio_sbale_curr(fsf_req); 223 return zfcp_qdio_sbale_curr(qdio, q_req);
277} 224}
278 225
279static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) 226static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
227 struct zfcp_queue_req *q_req)
280{ 228{
281 struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; 229 struct qdio_buffer **sbal = qdio->req_q.sbal;
282 int first = fsf_req->sbal_first; 230 int first = q_req->sbal_first;
283 int last = fsf_req->sbal_last; 231 int last = q_req->sbal_last;
284 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % 232 int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
285 QDIO_MAX_BUFFERS_PER_Q + 1; 233 QDIO_MAX_BUFFERS_PER_Q + 1;
286 zfcp_qdio_zero_sbals(sbal, first, count); 234 zfcp_qdio_zero_sbals(sbal, first, count);
287} 235}
288 236
289static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, 237static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
238 struct zfcp_queue_req *q_req,
290 unsigned int sbtype, void *start_addr, 239 unsigned int sbtype, void *start_addr,
291 unsigned int total_length) 240 unsigned int total_length)
292{ 241{
@@ -297,10 +246,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
297 /* split segment up */ 246 /* split segment up */
298 for (addr = start_addr, remaining = total_length; remaining > 0; 247 for (addr = start_addr, remaining = total_length; remaining > 0;
299 addr += length, remaining -= length) { 248 addr += length, remaining -= length) {
300 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); 249 sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
301 if (!sbale) { 250 if (!sbale) {
302 atomic_inc(&fsf_req->adapter->qdio_outb_full); 251 atomic_inc(&qdio->req_q_full);
303 zfcp_qdio_undo_sbals(fsf_req); 252 zfcp_qdio_undo_sbals(qdio, q_req);
304 return -EINVAL; 253 return -EINVAL;
305 } 254 }
306 255
@@ -322,29 +271,31 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
322 * @max_sbals: upper bound for number of SBALs to be used 271 * @max_sbals: upper bound for number of SBALs to be used
323 * Returns: number of bytes, or error (negativ) 272 * Returns: number of bytes, or error (negativ)
324 */ 273 */
325int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 274int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
326 struct scatterlist *sg, int max_sbals) 275 struct zfcp_queue_req *q_req,
276 unsigned long sbtype, struct scatterlist *sg,
277 int max_sbals)
327{ 278{
328 struct qdio_buffer_element *sbale; 279 struct qdio_buffer_element *sbale;
329 int retval, bytes = 0; 280 int retval, bytes = 0;
330 281
331 /* figure out last allowed SBAL */ 282 /* figure out last allowed SBAL */
332 zfcp_qdio_sbal_limit(fsf_req, max_sbals); 283 zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
333 284
334 /* set storage-block type for this request */ 285 /* set storage-block type for this request */
335 sbale = zfcp_qdio_sbale_req(fsf_req); 286 sbale = zfcp_qdio_sbale_req(qdio, q_req);
336 sbale->flags |= sbtype; 287 sbale->flags |= sbtype;
337 288
338 for (; sg; sg = sg_next(sg)) { 289 for (; sg; sg = sg_next(sg)) {
339 retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), 290 retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
340 sg->length); 291 sg_virt(sg), sg->length);
341 if (retval < 0) 292 if (retval < 0)
342 return retval; 293 return retval;
343 bytes += sg->length; 294 bytes += sg->length;
344 } 295 }
345 296
346 /* assume that no other SBALEs are to follow in the same SBAL */ 297 /* assume that no other SBALEs are to follow in the same SBAL */
347 sbale = zfcp_qdio_sbale_curr(fsf_req); 298 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
348 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 299 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
349 300
350 return bytes; 301 return bytes;
@@ -352,21 +303,22 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
352 303
353/** 304/**
354 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 305 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
355 * @fsf_req: pointer to struct zfcp_fsf_req 306 * @qdio: pointer to struct zfcp_qdio
307 * @q_req: pointer to struct zfcp_queue_req
356 * Returns: 0 on success, error otherwise 308 * Returns: 0 on success, error otherwise
357 */ 309 */
358int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) 310int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req)
359{ 311{
360 struct zfcp_adapter *adapter = fsf_req->adapter; 312 struct zfcp_qdio_queue *req_q = &qdio->req_q;
361 struct zfcp_qdio_queue *req_q = &adapter->req_q; 313 int first = q_req->sbal_first;
362 int first = fsf_req->sbal_first; 314 int count = q_req->sbal_number;
363 int count = fsf_req->sbal_number;
364 int retval; 315 int retval;
365 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 316 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
366 317
367 zfcp_qdio_account(adapter); 318 zfcp_qdio_account(qdio);
368 319
369 retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count); 320 retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first,
321 count);
370 if (unlikely(retval)) { 322 if (unlikely(retval)) {
371 zfcp_qdio_zero_sbals(req_q->sbal, first, count); 323 zfcp_qdio_zero_sbals(req_q->sbal, first, count);
372 return retval; 324 return retval;
@@ -379,63 +331,69 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
379 return 0; 331 return 0;
380} 332}
381 333
334
335static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
336 struct zfcp_qdio *qdio)
337{
338
339 id->cdev = qdio->adapter->ccw_device;
340 id->q_format = QDIO_ZFCP_QFMT;
341 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
342 ASCEBC(id->adapter_name, 8);
343 id->qib_param_field_format = 0;
344 id->qib_param_field = NULL;
345 id->input_slib_elements = NULL;
346 id->output_slib_elements = NULL;
347 id->no_input_qs = 1;
348 id->no_output_qs = 1;
349 id->input_handler = zfcp_qdio_int_resp;
350 id->output_handler = zfcp_qdio_int_req;
351 id->int_parm = (unsigned long) qdio;
352 id->flags = QDIO_INBOUND_0COPY_SBALS |
353 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
354 id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal);
355 id->output_sbal_addr_array = (void **) (qdio->req_q.sbal);
356
357}
382/** 358/**
383 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 359 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
384 * @adapter: pointer to struct zfcp_adapter 360 * @adapter: pointer to struct zfcp_adapter
385 * Returns: -ENOMEM on memory allocation error or return value from 361 * Returns: -ENOMEM on memory allocation error or return value from
386 * qdio_allocate 362 * qdio_allocate
387 */ 363 */
388int zfcp_qdio_allocate(struct zfcp_adapter *adapter) 364static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
389{ 365{
390 struct qdio_initialize *init_data; 366 struct qdio_initialize init_data;
391 367
392 if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || 368 if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) ||
393 zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) 369 zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal))
394 return -ENOMEM; 370 return -ENOMEM;
395 371
396 init_data = &adapter->qdio_init_data; 372 zfcp_qdio_setup_init_data(&init_data, qdio);
397 373
398 init_data->cdev = adapter->ccw_device; 374 return qdio_allocate(&init_data);
399 init_data->q_format = QDIO_ZFCP_QFMT;
400 memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8);
401 ASCEBC(init_data->adapter_name, 8);
402 init_data->qib_param_field_format = 0;
403 init_data->qib_param_field = NULL;
404 init_data->input_slib_elements = NULL;
405 init_data->output_slib_elements = NULL;
406 init_data->no_input_qs = 1;
407 init_data->no_output_qs = 1;
408 init_data->input_handler = zfcp_qdio_int_resp;
409 init_data->output_handler = zfcp_qdio_int_req;
410 init_data->int_parm = (unsigned long) adapter;
411 init_data->flags = QDIO_INBOUND_0COPY_SBALS |
412 QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
413 init_data->input_sbal_addr_array =
414 (void **) (adapter->resp_q.sbal);
415 init_data->output_sbal_addr_array =
416 (void **) (adapter->req_q.sbal);
417
418 return qdio_allocate(init_data);
419} 375}
420 376
421/** 377/**
422 * zfcp_close_qdio - close qdio queues for an adapter 378 * zfcp_close_qdio - close qdio queues for an adapter
379 * @qdio: pointer to structure zfcp_qdio
423 */ 380 */
424void zfcp_qdio_close(struct zfcp_adapter *adapter) 381void zfcp_qdio_close(struct zfcp_qdio *qdio)
425{ 382{
426 struct zfcp_qdio_queue *req_q; 383 struct zfcp_qdio_queue *req_q;
427 int first, count; 384 int first, count;
428 385
429 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 386 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
430 return; 387 return;
431 388
432 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 389 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
433 req_q = &adapter->req_q; 390 req_q = &qdio->req_q;
434 spin_lock_bh(&adapter->req_q_lock); 391 spin_lock_bh(&qdio->req_q_lock);
435 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 392 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
436 spin_unlock_bh(&adapter->req_q_lock); 393 spin_unlock_bh(&qdio->req_q_lock);
437 394
438 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 395 qdio_shutdown(qdio->adapter->ccw_device,
396 QDIO_FLAG_CLEANUP_USING_CLEAR);
439 397
440 /* cleanup used outbound sbals */ 398 /* cleanup used outbound sbals */
441 count = atomic_read(&req_q->count); 399 count = atomic_read(&req_q->count);
@@ -446,50 +404,99 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
446 } 404 }
447 req_q->first = 0; 405 req_q->first = 0;
448 atomic_set(&req_q->count, 0); 406 atomic_set(&req_q->count, 0);
449 adapter->resp_q.first = 0; 407 qdio->resp_q.first = 0;
450 atomic_set(&adapter->resp_q.count, 0); 408 atomic_set(&qdio->resp_q.count, 0);
451} 409}
452 410
453/** 411/**
454 * zfcp_qdio_open - prepare and initialize response queue 412 * zfcp_qdio_open - prepare and initialize response queue
455 * @adapter: pointer to struct zfcp_adapter 413 * @qdio: pointer to struct zfcp_qdio
456 * Returns: 0 on success, otherwise -EIO 414 * Returns: 0 on success, otherwise -EIO
457 */ 415 */
458int zfcp_qdio_open(struct zfcp_adapter *adapter) 416int zfcp_qdio_open(struct zfcp_qdio *qdio)
459{ 417{
460 struct qdio_buffer_element *sbale; 418 struct qdio_buffer_element *sbale;
419 struct qdio_initialize init_data;
420 struct ccw_device *cdev = qdio->adapter->ccw_device;
461 int cc; 421 int cc;
462 422
463 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 423 if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
464 return -EIO; 424 return -EIO;
465 425
466 if (qdio_establish(&adapter->qdio_init_data)) 426 zfcp_qdio_setup_init_data(&init_data, qdio);
427
428 if (qdio_establish(&init_data))
467 goto failed_establish; 429 goto failed_establish;
468 430
469 if (qdio_activate(adapter->ccw_device)) 431 if (qdio_activate(cdev))
470 goto failed_qdio; 432 goto failed_qdio;
471 433
472 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 434 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
473 sbale = &(adapter->resp_q.sbal[cc]->element[0]); 435 sbale = &(qdio->resp_q.sbal[cc]->element[0]);
474 sbale->length = 0; 436 sbale->length = 0;
475 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 437 sbale->flags = SBAL_FLAGS_LAST_ENTRY;
476 sbale->addr = NULL; 438 sbale->addr = NULL;
477 } 439 }
478 440
479 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, 441 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0,
480 QDIO_MAX_BUFFERS_PER_Q)) 442 QDIO_MAX_BUFFERS_PER_Q))
481 goto failed_qdio; 443 goto failed_qdio;
482 444
483 /* set index of first avalable SBALS / number of available SBALS */ 445 /* set index of first avalable SBALS / number of available SBALS */
484 adapter->req_q.first = 0; 446 qdio->req_q.first = 0;
485 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 447 atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
486 448
487 return 0; 449 return 0;
488 450
489failed_qdio: 451failed_qdio:
490 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 452 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
491failed_establish: 453failed_establish:
492 dev_err(&adapter->ccw_device->dev, 454 dev_err(&cdev->dev,
493 "Setting up the QDIO connection to the FCP adapter failed\n"); 455 "Setting up the QDIO connection to the FCP adapter failed\n");
494 return -EIO; 456 return -EIO;
495} 457}
458
459void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
460{
461 struct qdio_buffer **sbal_req, **sbal_resp;
462 int p;
463
464 if (!qdio)
465 return;
466
467 if (qdio->adapter->ccw_device)
468 qdio_free(qdio->adapter->ccw_device);
469
470 sbal_req = qdio->req_q.sbal;
471 sbal_resp = qdio->resp_q.sbal;
472
473 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
474 free_page((unsigned long) sbal_req[p]);
475 free_page((unsigned long) sbal_resp[p]);
476 }
477
478 kfree(qdio);
479}
480
481int zfcp_qdio_setup(struct zfcp_adapter *adapter)
482{
483 struct zfcp_qdio *qdio;
484
485 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
486 if (!qdio)
487 return -ENOMEM;
488
489 qdio->adapter = adapter;
490
491 if (zfcp_qdio_allocate(qdio)) {
492 zfcp_qdio_destroy(qdio);
493 return -ENOMEM;
494 }
495
496 spin_lock_init(&qdio->req_q_lock);
497 spin_lock_init(&qdio->stat_lock);
498
499 adapter->qdio = qdio;
500 return 0;
501}
502
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 6925a1784682..3ff726afafc6 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -9,8 +9,9 @@
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include "zfcp_ext.h"
13#include <asm/atomic.h> 12#include <asm/atomic.h>
13#include "zfcp_ext.h"
14#include "zfcp_dbf.h"
14 15
15static unsigned int default_depth = 32; 16static unsigned int default_depth = 32;
16module_param_named(queue_depth, default_depth, uint, 0600); 17module_param_named(queue_depth, default_depth, uint, 0600);
@@ -52,11 +53,11 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
52 53
53static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 54static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
54{ 55{
56 struct zfcp_adapter *adapter =
57 (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
55 set_host_byte(scpnt, result); 58 set_host_byte(scpnt, result);
56 if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) 59 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
57 zfcp_scsi_dbf_event_result("fail", 4, 60 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
58 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
59 scpnt, NULL);
60 /* return directly */ 61 /* return directly */
61 scpnt->scsi_done(scpnt); 62 scpnt->scsi_done(scpnt);
62} 63}
@@ -92,7 +93,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
92 scsi_result = fc_remote_port_chkready(rport); 93 scsi_result = fc_remote_port_chkready(rport);
93 if (unlikely(scsi_result)) { 94 if (unlikely(scsi_result)) {
94 scpnt->result = scsi_result; 95 scpnt->result = scsi_result;
95 zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL); 96 zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
96 scpnt->scsi_done(scpnt); 97 scpnt->scsi_done(scpnt);
97 return 0; 98 return 0;
98 } 99 }
@@ -180,8 +181,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
180 spin_unlock(&adapter->req_list_lock); 181 spin_unlock(&adapter->req_list_lock);
181 if (!old_req) { 182 if (!old_req) {
182 write_unlock_irqrestore(&adapter->abort_lock, flags); 183 write_unlock_irqrestore(&adapter->abort_lock, flags);
183 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 184 zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
184 old_reqid); 185 old_reqid);
185 return FAILED; /* completion could be in progress */ 186 return FAILED; /* completion could be in progress */
186 } 187 }
187 old_req->data = NULL; 188 old_req->data = NULL;
@@ -197,16 +198,15 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
197 zfcp_erp_wait(adapter); 198 zfcp_erp_wait(adapter);
198 if (!(atomic_read(&adapter->status) & 199 if (!(atomic_read(&adapter->status) &
199 ZFCP_STATUS_COMMON_RUNNING)) { 200 ZFCP_STATUS_COMMON_RUNNING)) {
200 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 201 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
201 old_reqid); 202 old_reqid);
202 return SUCCESS; 203 return SUCCESS;
203 } 204 }
204 } 205 }
205 if (!abrt_req) 206 if (!abrt_req)
206 return FAILED; 207 return FAILED;
207 208
208 wait_event(abrt_req->completion_wq, 209 wait_for_completion(&abrt_req->completion);
209 abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
210 210
211 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) 211 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
212 dbf_tag = "okay"; 212 dbf_tag = "okay";
@@ -216,7 +216,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
216 dbf_tag = "fail"; 216 dbf_tag = "fail";
217 retval = FAILED; 217 retval = FAILED;
218 } 218 }
219 zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid); 219 zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid);
220 zfcp_fsf_req_free(abrt_req); 220 zfcp_fsf_req_free(abrt_req);
221 return retval; 221 return retval;
222} 222}
@@ -225,7 +225,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
225{ 225{
226 struct zfcp_unit *unit = scpnt->device->hostdata; 226 struct zfcp_unit *unit = scpnt->device->hostdata;
227 struct zfcp_adapter *adapter = unit->port->adapter; 227 struct zfcp_adapter *adapter = unit->port->adapter;
228 struct zfcp_fsf_req *fsf_req; 228 struct zfcp_fsf_req *fsf_req = NULL;
229 int retval = SUCCESS; 229 int retval = SUCCESS;
230 int retry = 3; 230 int retry = 3;
231 231
@@ -237,25 +237,23 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
237 zfcp_erp_wait(adapter); 237 zfcp_erp_wait(adapter);
238 if (!(atomic_read(&adapter->status) & 238 if (!(atomic_read(&adapter->status) &
239 ZFCP_STATUS_COMMON_RUNNING)) { 239 ZFCP_STATUS_COMMON_RUNNING)) {
240 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, 240 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
241 scpnt);
242 return SUCCESS; 241 return SUCCESS;
243 } 242 }
244 } 243 }
245 if (!fsf_req) 244 if (!fsf_req)
246 return FAILED; 245 return FAILED;
247 246
248 wait_event(fsf_req->completion_wq, 247 wait_for_completion(&fsf_req->completion);
249 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
250 248
251 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 249 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
252 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); 250 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
253 retval = FAILED; 251 retval = FAILED;
254 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { 252 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
255 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt); 253 zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
256 retval = FAILED; 254 retval = FAILED;
257 } else 255 } else
258 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); 256 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
259 257
260 zfcp_fsf_req_free(fsf_req); 258 zfcp_fsf_req_free(fsf_req);
261 return retval; 259 return retval;
@@ -430,7 +428,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
430 if (!data) 428 if (!data)
431 return NULL; 429 return NULL;
432 430
433 ret = zfcp_fsf_exchange_port_data_sync(adapter, data); 431 ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
434 if (ret) { 432 if (ret) {
435 kfree(data); 433 kfree(data);
436 return NULL; 434 return NULL;
@@ -459,7 +457,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
459 if (!data) 457 if (!data)
460 return; 458 return;
461 459
462 ret = zfcp_fsf_exchange_port_data_sync(adapter, data); 460 ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
463 if (ret) 461 if (ret)
464 kfree(data); 462 kfree(data);
465 else { 463 else {
@@ -493,21 +491,6 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
493} 491}
494 492
495/** 493/**
496 * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport
497 * @rport: The rport that is about to be deleted.
498 */
499static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
500{
501 struct zfcp_port *port;
502
503 write_lock_irq(&zfcp_data.config_lock);
504 port = rport->dd_data;
505 if (port)
506 port->rport = NULL;
507 write_unlock_irq(&zfcp_data.config_lock);
508}
509
510/**
511 * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport 494 * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
512 * @rport: The FC rport where to teminate I/O 495 * @rport: The FC rport where to teminate I/O
513 * 496 *
@@ -518,9 +501,12 @@ static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
518static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 501static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
519{ 502{
520 struct zfcp_port *port; 503 struct zfcp_port *port;
504 struct Scsi_Host *shost = rport_to_shost(rport);
505 struct zfcp_adapter *adapter =
506 (struct zfcp_adapter *)shost->hostdata[0];
521 507
522 write_lock_irq(&zfcp_data.config_lock); 508 write_lock_irq(&zfcp_data.config_lock);
523 port = rport->dd_data; 509 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
524 if (port) 510 if (port)
525 zfcp_port_get(port); 511 zfcp_port_get(port);
526 write_unlock_irq(&zfcp_data.config_lock); 512 write_unlock_irq(&zfcp_data.config_lock);
@@ -552,7 +538,6 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
552 return; 538 return;
553 } 539 }
554 540
555 rport->dd_data = port;
556 rport->maxframe_size = port->maxframe_size; 541 rport->maxframe_size = port->maxframe_size;
557 rport->supported_classes = port->supported_classes; 542 rport->supported_classes = port->supported_classes;
558 port->rport = rport; 543 port->rport = rport;
@@ -573,7 +558,7 @@ void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
573 zfcp_port_get(port); 558 zfcp_port_get(port);
574 port->rport_task = RPORT_ADD; 559 port->rport_task = RPORT_ADD;
575 560
576 if (!queue_work(zfcp_data.work_queue, &port->rport_work)) 561 if (!queue_work(port->adapter->work_queue, &port->rport_work))
577 zfcp_port_put(port); 562 zfcp_port_put(port);
578} 563}
579 564
@@ -582,8 +567,11 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
582 zfcp_port_get(port); 567 zfcp_port_get(port);
583 port->rport_task = RPORT_DEL; 568 port->rport_task = RPORT_DEL;
584 569
585 if (!queue_work(zfcp_data.work_queue, &port->rport_work)) 570 if (port->rport && queue_work(port->adapter->work_queue,
586 zfcp_port_put(port); 571 &port->rport_work))
572 return;
573
574 zfcp_port_put(port);
587} 575}
588 576
589void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) 577void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@@ -662,7 +650,6 @@ struct fc_function_template zfcp_transport_functions = {
662 .reset_fc_host_stats = zfcp_reset_fc_host_stats, 650 .reset_fc_host_stats = zfcp_reset_fc_host_stats,
663 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, 651 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
664 .get_host_port_state = zfcp_get_host_port_state, 652 .get_host_port_state = zfcp_get_host_port_state,
665 .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
666 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 653 .terminate_rport_io = zfcp_scsi_terminate_rport_io,
667 .show_host_port_state = 1, 654 .show_host_port_state = 1,
668 .bsg_request = zfcp_execute_fc_job, 655 .bsg_request = zfcp_execute_fc_job,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 0fe5cce818cb..079a8cf518a3 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -88,7 +88,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
88 unsigned long val; \ 88 unsigned long val; \
89 int retval = 0; \ 89 int retval = 0; \
90 \ 90 \
91 down(&zfcp_data.config_sema); \ 91 mutex_lock(&zfcp_data.config_mutex); \
92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ 92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \
93 retval = -EBUSY; \ 93 retval = -EBUSY; \
94 goto out; \ 94 goto out; \
@@ -105,7 +105,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
105 _reopen_id, NULL); \ 105 _reopen_id, NULL); \
106 zfcp_erp_wait(_adapter); \ 106 zfcp_erp_wait(_adapter); \
107out: \ 107out: \
108 up(&zfcp_data.config_sema); \ 108 mutex_unlock(&zfcp_data.config_mutex); \
109 return retval ? retval : (ssize_t) count; \ 109 return retval ? retval : (ssize_t) count; \
110} \ 110} \
111static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ 111static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
@@ -126,7 +126,7 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
126 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) 126 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
127 return -EBUSY; 127 return -EBUSY;
128 128
129 ret = zfcp_scan_ports(adapter); 129 ret = zfcp_fc_scan_ports(adapter);
130 return ret ? ret : (ssize_t) count; 130 return ret ? ret : (ssize_t) count;
131} 131}
132static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, 132static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
@@ -142,7 +142,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
142 int retval = 0; 142 int retval = 0;
143 LIST_HEAD(port_remove_lh); 143 LIST_HEAD(port_remove_lh);
144 144
145 down(&zfcp_data.config_sema); 145 mutex_lock(&zfcp_data.config_mutex);
146 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { 146 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
147 retval = -EBUSY; 147 retval = -EBUSY;
148 goto out; 148 goto out;
@@ -173,7 +173,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
173 zfcp_port_put(port); 173 zfcp_port_put(port);
174 zfcp_port_dequeue(port); 174 zfcp_port_dequeue(port);
175 out: 175 out:
176 up(&zfcp_data.config_sema); 176 mutex_unlock(&zfcp_data.config_mutex);
177 return retval ? retval : (ssize_t) count; 177 return retval ? retval : (ssize_t) count;
178} 178}
179static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, 179static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -207,7 +207,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
207 u64 fcp_lun; 207 u64 fcp_lun;
208 int retval = -EINVAL; 208 int retval = -EINVAL;
209 209
210 down(&zfcp_data.config_sema); 210 mutex_lock(&zfcp_data.config_mutex);
211 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 211 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
212 retval = -EBUSY; 212 retval = -EBUSY;
213 goto out; 213 goto out;
@@ -226,7 +226,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
226 zfcp_erp_wait(unit->port->adapter); 226 zfcp_erp_wait(unit->port->adapter);
227 zfcp_unit_put(unit); 227 zfcp_unit_put(unit);
228out: 228out:
229 up(&zfcp_data.config_sema); 229 mutex_unlock(&zfcp_data.config_mutex);
230 return retval ? retval : (ssize_t) count; 230 return retval ? retval : (ssize_t) count;
231} 231}
232static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 232static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -241,7 +241,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
241 int retval = 0; 241 int retval = 0;
242 LIST_HEAD(unit_remove_lh); 242 LIST_HEAD(unit_remove_lh);
243 243
244 down(&zfcp_data.config_sema); 244 mutex_lock(&zfcp_data.config_mutex);
245 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 245 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
246 retval = -EBUSY; 246 retval = -EBUSY;
247 goto out; 247 goto out;
@@ -282,7 +282,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
282 zfcp_unit_put(unit); 282 zfcp_unit_put(unit);
283 zfcp_unit_dequeue(unit); 283 zfcp_unit_dequeue(unit);
284out: 284out:
285 up(&zfcp_data.config_sema); 285 mutex_unlock(&zfcp_data.config_mutex);
286 return retval ? retval : (ssize_t) count; 286 return retval ? retval : (ssize_t) count;
287} 287}
288static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 288static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
@@ -425,7 +425,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
425 if (!qtcb_port) 425 if (!qtcb_port)
426 return -ENOMEM; 426 return -ENOMEM;
427 427
428 retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port); 428 retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
429 if (!retval) 429 if (!retval)
430 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, 430 retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
431 qtcb_port->cb_util, qtcb_port->a_util); 431 qtcb_port->cb_util, qtcb_port->a_util);
@@ -451,7 +451,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
451 if (!qtcb_config) 451 if (!qtcb_config)
452 return -ENOMEM; 452 return -ENOMEM;
453 453
454 retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config); 454 retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
455 if (!retval) 455 if (!retval)
456 *stat_inf = qtcb_config->stat_info; 456 *stat_inf = qtcb_config->stat_info;
457 457
@@ -492,15 +492,15 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
492 char *buf) 492 char *buf)
493{ 493{
494 struct Scsi_Host *scsi_host = class_to_shost(dev); 494 struct Scsi_Host *scsi_host = class_to_shost(dev);
495 struct zfcp_adapter *adapter = 495 struct zfcp_qdio *qdio =
496 (struct zfcp_adapter *) scsi_host->hostdata[0]; 496 ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
497 u64 util; 497 u64 util;
498 498
499 spin_lock_bh(&adapter->qdio_stat_lock); 499 spin_lock_bh(&qdio->stat_lock);
500 util = adapter->req_q_util; 500 util = qdio->req_q_util;
501 spin_unlock_bh(&adapter->qdio_stat_lock); 501 spin_unlock_bh(&qdio->stat_lock);
502 502
503 return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), 503 return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
504 (unsigned long long)util); 504 (unsigned long long)util);
505} 505}
506static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); 506static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9c23122f755f..82bb3b2d207a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1811,6 +1811,12 @@ config ZFCP
1811 called zfcp. If you want to compile it as a module, say M here 1811 called zfcp. If you want to compile it as a module, say M here
1812 and read <file:Documentation/kbuild/modules.txt>. 1812 and read <file:Documentation/kbuild/modules.txt>.
1813 1813
1814config SCSI_PMCRAID
1815 tristate "PMC SIERRA Linux MaxRAID adapter support"
1816 depends on PCI && SCSI
1817 ---help---
1818 This driver supports the PMC SIERRA MaxRAID adapters.
1819
1814config SCSI_SRP 1820config SCSI_SRP
1815 tristate "SCSI RDMA Protocol helper library" 1821 tristate "SCSI RDMA Protocol helper library"
1816 depends on SCSI && PCI 1822 depends on SCSI && PCI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 25429ea63d0a..61a94af3cee7 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
130obj-$(CONFIG_PS3_ROM) += ps3rom.o 130obj-$(CONFIG_PS3_ROM) += ps3rom.o
131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
133obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
133 134
134obj-$(CONFIG_ARM) += arm/ 135obj-$(CONFIG_ARM) += arm/
135 136
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index ae4b2d588fd3..0c4210d48ee8 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -15,11 +15,10 @@
15 15
16static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); 16static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18static int bnx2i_reg_device;
19 18
20#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.0.1d" 20#define DRV_MODULE_VERSION "2.0.1e"
22#define DRV_MODULE_RELDATE "Mar 25, 2009" 21#define DRV_MODULE_RELDATE "June 22, 2009"
23 22
24static char version[] __devinitdata = 23static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -31,7 +30,7 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
31MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
32MODULE_VERSION(DRV_MODULE_VERSION); 31MODULE_VERSION(DRV_MODULE_VERSION);
33 32
34static DEFINE_RWLOCK(bnx2i_dev_lock); 33static DEFINE_MUTEX(bnx2i_dev_lock);
35 34
36unsigned int event_coal_div = 1; 35unsigned int event_coal_div = 1;
37module_param(event_coal_div, int, 0664); 36module_param(event_coal_div, int, 0664);
@@ -100,14 +99,14 @@ struct bnx2i_hba *get_adapter_list_head(void)
100 if (!adapter_count) 99 if (!adapter_count)
101 goto hba_not_found; 100 goto hba_not_found;
102 101
103 read_lock(&bnx2i_dev_lock); 102 mutex_lock(&bnx2i_dev_lock);
104 list_for_each_entry(tmp_hba, &adapter_list, link) { 103 list_for_each_entry(tmp_hba, &adapter_list, link) {
105 if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { 104 if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
106 hba = tmp_hba; 105 hba = tmp_hba;
107 break; 106 break;
108 } 107 }
109 } 108 }
110 read_unlock(&bnx2i_dev_lock); 109 mutex_unlock(&bnx2i_dev_lock);
111hba_not_found: 110hba_not_found:
112 return hba; 111 return hba;
113} 112}
@@ -122,14 +121,14 @@ struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
122{ 121{
123 struct bnx2i_hba *hba, *temp; 122 struct bnx2i_hba *hba, *temp;
124 123
125 read_lock(&bnx2i_dev_lock); 124 mutex_lock(&bnx2i_dev_lock);
126 list_for_each_entry_safe(hba, temp, &adapter_list, link) { 125 list_for_each_entry_safe(hba, temp, &adapter_list, link) {
127 if (hba->cnic == cnic) { 126 if (hba->cnic == cnic) {
128 read_unlock(&bnx2i_dev_lock); 127 mutex_unlock(&bnx2i_dev_lock);
129 return hba; 128 return hba;
130 } 129 }
131 } 130 }
132 read_unlock(&bnx2i_dev_lock); 131 mutex_unlock(&bnx2i_dev_lock);
133 return NULL; 132 return NULL;
134} 133}
135 134
@@ -186,18 +185,17 @@ void bnx2i_stop(void *handle)
186 */ 185 */
187void bnx2i_register_device(struct bnx2i_hba *hba) 186void bnx2i_register_device(struct bnx2i_hba *hba)
188{ 187{
188 int rc;
189
189 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || 190 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
190 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 191 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
191 return; 192 return;
192 } 193 }
193 194
194 hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); 195 rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
195 196
196 spin_lock(&hba->lock); 197 if (!rc)
197 bnx2i_reg_device++; 198 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
198 spin_unlock(&hba->lock);
199
200 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
201} 199}
202 200
203 201
@@ -211,10 +209,10 @@ void bnx2i_reg_dev_all(void)
211{ 209{
212 struct bnx2i_hba *hba, *temp; 210 struct bnx2i_hba *hba, *temp;
213 211
214 read_lock(&bnx2i_dev_lock); 212 mutex_lock(&bnx2i_dev_lock);
215 list_for_each_entry_safe(hba, temp, &adapter_list, link) 213 list_for_each_entry_safe(hba, temp, &adapter_list, link)
216 bnx2i_register_device(hba); 214 bnx2i_register_device(hba);
217 read_unlock(&bnx2i_dev_lock); 215 mutex_unlock(&bnx2i_dev_lock);
218} 216}
219 217
220 218
@@ -234,10 +232,6 @@ static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
234 232
235 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 233 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
236 234
237 spin_lock(&hba->lock);
238 bnx2i_reg_device--;
239 spin_unlock(&hba->lock);
240
241 /* ep_disconnect could come before NETDEV_DOWN, driver won't 235 /* ep_disconnect could come before NETDEV_DOWN, driver won't
242 * see NETDEV_DOWN as it already unregistered itself. 236 * see NETDEV_DOWN as it already unregistered itself.
243 */ 237 */
@@ -255,10 +249,10 @@ void bnx2i_unreg_dev_all(void)
255{ 249{
256 struct bnx2i_hba *hba, *temp; 250 struct bnx2i_hba *hba, *temp;
257 251
258 read_lock(&bnx2i_dev_lock); 252 mutex_lock(&bnx2i_dev_lock);
259 list_for_each_entry_safe(hba, temp, &adapter_list, link) 253 list_for_each_entry_safe(hba, temp, &adapter_list, link)
260 bnx2i_unreg_one_device(hba); 254 bnx2i_unreg_one_device(hba);
261 read_unlock(&bnx2i_dev_lock); 255 mutex_unlock(&bnx2i_dev_lock);
262} 256}
263 257
264 258
@@ -267,35 +261,34 @@ void bnx2i_unreg_dev_all(void)
267 * @hba: bnx2i adapter instance 261 * @hba: bnx2i adapter instance
268 * @cnic: cnic device handle 262 * @cnic: cnic device handle
269 * 263 *
270 * Global resource lock and host adapter lock is held during critical sections 264 * Global resource lock is held during critical sections below. This routine is
271 * below. This routine is called from cnic_register_driver() context and 265 * called from either cnic_register_driver() or device hot plug context and
272 * work horse thread which does majority of device specific initialization 266 * and does majority of device specific initialization
273 */ 267 */
274static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) 268static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
275{ 269{
276 int rc; 270 int rc;
277 271
278 read_lock(&bnx2i_dev_lock); 272 mutex_lock(&bnx2i_dev_lock);
279 if (bnx2i_reg_device && 273 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
280 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 274 if (!rc) {
281 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
282 if (rc) /* duplicate registration */
283 printk(KERN_ERR "bnx2i- dev reg failed\n");
284
285 spin_lock(&hba->lock);
286 bnx2i_reg_device++;
287 hba->age++; 275 hba->age++;
288 spin_unlock(&hba->lock);
289
290 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 276 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
291 } 277 list_add_tail(&hba->link, &adapter_list);
292 read_unlock(&bnx2i_dev_lock); 278 adapter_count++;
293 279 } else if (rc == -EBUSY) /* duplicate registration */
294 write_lock(&bnx2i_dev_lock); 280 printk(KERN_ALERT "bnx2i, duplicate registration"
295 list_add_tail(&hba->link, &adapter_list); 281 "hba=%p, cnic=%p\n", hba, cnic);
296 adapter_count++; 282 else if (rc == -EAGAIN)
297 write_unlock(&bnx2i_dev_lock); 283 printk(KERN_ERR "bnx2i, driver not registered\n");
298 return 0; 284 else if (rc == -EINVAL)
285 printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI);
286 else
287 printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc);
288
289 mutex_unlock(&bnx2i_dev_lock);
290
291 return rc;
299} 292}
300 293
301 294
@@ -343,19 +336,15 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
343 "found, dev 0x%p\n", dev); 336 "found, dev 0x%p\n", dev);
344 return; 337 return;
345 } 338 }
346 write_lock(&bnx2i_dev_lock); 339 mutex_lock(&bnx2i_dev_lock);
347 list_del_init(&hba->link); 340 list_del_init(&hba->link);
348 adapter_count--; 341 adapter_count--;
349 342
350 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 343 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
351 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 344 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
352 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 345 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
353
354 spin_lock(&hba->lock);
355 bnx2i_reg_device--;
356 spin_unlock(&hba->lock);
357 } 346 }
358 write_unlock(&bnx2i_dev_lock); 347 mutex_unlock(&bnx2i_dev_lock);
359 348
360 bnx2i_free_hba(hba); 349 bnx2i_free_hba(hba);
361} 350}
@@ -377,6 +366,8 @@ static int __init bnx2i_mod_init(void)
377 if (!is_power_of_2(sq_size)) 366 if (!is_power_of_2(sq_size))
378 sq_size = roundup_pow_of_two(sq_size); 367 sq_size = roundup_pow_of_two(sq_size);
379 368
369 mutex_init(&bnx2i_dev_lock);
370
380 bnx2i_scsi_xport_template = 371 bnx2i_scsi_xport_template =
381 iscsi_register_transport(&bnx2i_iscsi_transport); 372 iscsi_register_transport(&bnx2i_iscsi_transport);
382 if (!bnx2i_scsi_xport_template) { 373 if (!bnx2i_scsi_xport_template) {
@@ -412,7 +403,7 @@ static void __exit bnx2i_mod_exit(void)
412{ 403{
413 struct bnx2i_hba *hba; 404 struct bnx2i_hba *hba;
414 405
415 write_lock(&bnx2i_dev_lock); 406 mutex_lock(&bnx2i_dev_lock);
416 while (!list_empty(&adapter_list)) { 407 while (!list_empty(&adapter_list)) {
417 hba = list_entry(adapter_list.next, struct bnx2i_hba, link); 408 hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
418 list_del(&hba->link); 409 list_del(&hba->link);
@@ -421,14 +412,11 @@ static void __exit bnx2i_mod_exit(void)
421 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 412 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
422 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); 413 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
423 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); 414 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
424 bnx2i_reg_device--;
425 } 415 }
426 416
427 write_unlock(&bnx2i_dev_lock);
428 bnx2i_free_hba(hba); 417 bnx2i_free_hba(hba);
429 write_lock(&bnx2i_dev_lock);
430 } 418 }
431 write_unlock(&bnx2i_dev_lock); 419 mutex_unlock(&bnx2i_dev_lock);
432 420
433 iscsi_unregister_transport(&bnx2i_iscsi_transport); 421 iscsi_unregister_transport(&bnx2i_iscsi_transport);
434 cnic_unregister_driver(CNIC_ULP_ISCSI); 422 cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f7412196f2f8..9a7ba71f1af4 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -387,6 +387,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
387 bnx2i_ep = ep->dd_data; 387 bnx2i_ep = ep->dd_data;
388 INIT_LIST_HEAD(&bnx2i_ep->link); 388 INIT_LIST_HEAD(&bnx2i_ep->link);
389 bnx2i_ep->state = EP_STATE_IDLE; 389 bnx2i_ep->state = EP_STATE_IDLE;
390 bnx2i_ep->ep_iscsi_cid = (u16) -1;
390 bnx2i_ep->hba = hba; 391 bnx2i_ep->hba = hba;
391 bnx2i_ep->hba_age = hba->age; 392 bnx2i_ep->hba_age = hba->age;
392 hba->ofld_conns_active++; 393 hba->ofld_conns_active++;
@@ -1160,9 +1161,6 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1160 struct bnx2i_cmd *cmd = task->dd_data; 1161 struct bnx2i_cmd *cmd = task->dd_data;
1161 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1162 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1162 1163
1163 if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
1164 return -ENOTCONN;
1165
1166 if (!bnx2i_conn->is_bound) 1164 if (!bnx2i_conn->is_bound)
1167 return -ENOTCONN; 1165 return -ENOTCONN;
1168 1166
@@ -1653,15 +1651,18 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1653 struct iscsi_endpoint *ep; 1651 struct iscsi_endpoint *ep;
1654 int rc = 0; 1652 int rc = 0;
1655 1653
1656 if (shost) 1654 if (shost) {
1657 /* driver is given scsi host to work with */ 1655 /* driver is given scsi host to work with */
1658 hba = iscsi_host_priv(shost); 1656 hba = iscsi_host_priv(shost);
1659 else 1657 /* Register the device with cnic if not already done so */
1658 bnx2i_register_device(hba);
1659 } else
1660 /* 1660 /*
1661 * check if the given destination can be reached through 1661 * check if the given destination can be reached through
1662 * a iscsi capable NetXtreme2 device 1662 * a iscsi capable NetXtreme2 device
1663 */ 1663 */
1664 hba = bnx2i_check_route(dst_addr); 1664 hba = bnx2i_check_route(dst_addr);
1665
1665 if (!hba) { 1666 if (!hba) {
1666 rc = -ENOMEM; 1667 rc = -ENOMEM;
1667 goto check_busy; 1668 goto check_busy;
@@ -1681,8 +1682,6 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1681 goto net_if_down; 1682 goto net_if_down;
1682 } 1683 }
1683 1684
1684 bnx2i_ep->state = EP_STATE_IDLE;
1685 bnx2i_ep->ep_iscsi_cid = (u16) -1;
1686 bnx2i_ep->num_active_cmds = 0; 1685 bnx2i_ep->num_active_cmds = 0;
1687 iscsi_cid = bnx2i_alloc_iscsi_cid(hba); 1686 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1688 if (iscsi_cid == -1) { 1687 if (iscsi_cid == -1) {
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 7b1633a8c15a..fe11c1d4b31d 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -353,6 +353,12 @@ ch_readconfig(scsi_changer *ch)
353 /* look up the devices of the data transfer elements */ 353 /* look up the devices of the data transfer elements */
354 ch->dt = kmalloc(ch->counts[CHET_DT]*sizeof(struct scsi_device), 354 ch->dt = kmalloc(ch->counts[CHET_DT]*sizeof(struct scsi_device),
355 GFP_KERNEL); 355 GFP_KERNEL);
356
357 if (!ch->dt) {
358 kfree(buffer);
359 return -ENOMEM;
360 }
361
356 for (elem = 0; elem < ch->counts[CHET_DT]; elem++) { 362 for (elem = 0; elem < ch->counts[CHET_DT]; elem++) {
357 id = -1; 363 id = -1;
358 lun = 0; 364 lun = 0;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index e79e18101f87..63abb06c4edb 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -4,8 +4,7 @@
4 * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422) 4 * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422)
5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) 5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
6 * by D. Gilbert and aeb (20020609) 6 * by D. Gilbert and aeb (20020609)
7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 7 * Update to SPC-4 T10/1713-D Rev 20, 22 May 2009, D. Gilbert 20090624
8 * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
9 */ 8 */
10 9
11#include <linux/blkdev.h> 10#include <linux/blkdev.h>
@@ -56,9 +55,9 @@ static const char * cdb_byte0_names[] = {
56 "Read Buffer", 55 "Read Buffer",
57/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", 56/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
58/* 40-41 */ "Change Definition", "Write Same(10)", 57/* 40-41 */ "Change Definition", "Write Same(10)",
59/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", 58/* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP",
60 "Play audio(10)", "Get configuration", "Play audio msf", 59 "Read density support", "Play audio(10)", "Get configuration",
61 "Play audio track/index", 60 "Play audio msf", "Play audio track/index",
62/* 49-4f */ "Play track relative(10)", "Get event status notification", 61/* 49-4f */ "Play track relative(10)", "Get event status notification",
63 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", 62 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
64 NULL, 63 NULL,
@@ -71,12 +70,13 @@ static const char * cdb_byte0_names[] = {
71/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 70/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 71/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 72/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
74/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", 73/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB",
74 "Variable length",
75/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy", 75/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
76 "Receive copy results", 76 "Receive copy results",
77/* 85-89 */ "ATA command pass through(16)", "Access control in", 77/* 85-89 */ "ATA command pass through(16)", "Access control in",
78 "Access control out", "Read(16)", "Memory Export Out(16)", 78 "Access control out", "Read(16)", "Memory Export Out(16)",
79/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes", 79/* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes",
80 "Write and verify(16)", "Verify(16)", 80 "Write and verify(16)", "Verify(16)",
81/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", 81/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
82 "Lock/unlock cache(16)", "Write same(16)", NULL, 82 "Lock/unlock cache(16)", "Write same(16)", NULL,
@@ -107,22 +107,24 @@ struct value_name_pair {
107}; 107};
108 108
109static const struct value_name_pair maint_in_arr[] = { 109static const struct value_name_pair maint_in_arr[] = {
110 {0x5, "Report device identifier"}, 110 {0x5, "Report identifying information"},
111 {0xa, "Report target port groups"}, 111 {0xa, "Report target port groups"},
112 {0xb, "Report aliases"}, 112 {0xb, "Report aliases"},
113 {0xc, "Report supported operation codes"}, 113 {0xc, "Report supported operation codes"},
114 {0xd, "Report supported task management functions"}, 114 {0xd, "Report supported task management functions"},
115 {0xe, "Report priority"}, 115 {0xe, "Report priority"},
116 {0xf, "Report timestamp"}, 116 {0xf, "Report timestamp"},
117 {0x10, "Management protocol in"},
117}; 118};
118#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) 119#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
119 120
120static const struct value_name_pair maint_out_arr[] = { 121static const struct value_name_pair maint_out_arr[] = {
121 {0x6, "Set device identifier"}, 122 {0x6, "Set identifying information"},
122 {0xa, "Set target port groups"}, 123 {0xa, "Set target port groups"},
123 {0xb, "Change aliases"}, 124 {0xb, "Change aliases"},
124 {0xe, "Set priority"}, 125 {0xe, "Set priority"},
125 {0xe, "Set timestamp"}, 126 {0xf, "Set timestamp"},
127 {0x10, "Management protocol out"},
126}; 128};
127#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) 129#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
128 130
@@ -412,6 +414,7 @@ static const struct error_info additional[] =
412 {0x0004, "Beginning-of-partition/medium detected"}, 414 {0x0004, "Beginning-of-partition/medium detected"},
413 {0x0005, "End-of-data detected"}, 415 {0x0005, "End-of-data detected"},
414 {0x0006, "I/O process terminated"}, 416 {0x0006, "I/O process terminated"},
417 {0x0007, "Programmable early warning detected"},
415 {0x0011, "Audio play operation in progress"}, 418 {0x0011, "Audio play operation in progress"},
416 {0x0012, "Audio play operation paused"}, 419 {0x0012, "Audio play operation paused"},
417 {0x0013, "Audio play operation successfully completed"}, 420 {0x0013, "Audio play operation successfully completed"},
@@ -425,6 +428,7 @@ static const struct error_info additional[] =
425 {0x001B, "Set capacity operation in progress"}, 428 {0x001B, "Set capacity operation in progress"},
426 {0x001C, "Verify operation in progress"}, 429 {0x001C, "Verify operation in progress"},
427 {0x001D, "ATA pass through information available"}, 430 {0x001D, "ATA pass through information available"},
431 {0x001E, "Conflicting SA creation request"},
428 432
429 {0x0100, "No index/sector signal"}, 433 {0x0100, "No index/sector signal"},
430 434
@@ -449,9 +453,12 @@ static const struct error_info additional[] =
449 {0x040B, "Logical unit not accessible, target port in standby state"}, 453 {0x040B, "Logical unit not accessible, target port in standby state"},
450 {0x040C, "Logical unit not accessible, target port in unavailable " 454 {0x040C, "Logical unit not accessible, target port in unavailable "
451 "state"}, 455 "state"},
456 {0x040D, "Logical unit not ready, structure check required"},
452 {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, 457 {0x0410, "Logical unit not ready, auxiliary memory not accessible"},
453 {0x0411, "Logical unit not ready, notify (enable spinup) required"}, 458 {0x0411, "Logical unit not ready, notify (enable spinup) required"},
454 {0x0412, "Logical unit not ready, offline"}, 459 {0x0412, "Logical unit not ready, offline"},
460 {0x0413, "Logical unit not ready, SA creation in progress"},
461 {0x0414, "Logical unit not ready, space allocation in progress"},
455 462
456 {0x0500, "Logical unit does not respond to selection"}, 463 {0x0500, "Logical unit does not respond to selection"},
457 464
@@ -479,6 +486,9 @@ static const struct error_info additional[] =
479 {0x0B03, "Warning - background self-test failed"}, 486 {0x0B03, "Warning - background self-test failed"},
480 {0x0B04, "Warning - background pre-scan detected medium error"}, 487 {0x0B04, "Warning - background pre-scan detected medium error"},
481 {0x0B05, "Warning - background medium scan detected medium error"}, 488 {0x0B05, "Warning - background medium scan detected medium error"},
489 {0x0B06, "Warning - non-volatile cache now volatile"},
490 {0x0B07, "Warning - degraded power to non-volatile cache"},
491 {0x0B08, "Warning - power loss expected"},
482 492
483 {0x0C00, "Write error"}, 493 {0x0C00, "Write error"},
484 {0x0C01, "Write error - recovered with auto reallocation"}, 494 {0x0C01, "Write error - recovered with auto reallocation"},
@@ -593,6 +603,7 @@ static const struct error_info additional[] =
593 {0x1C02, "Grown defect list not found"}, 603 {0x1C02, "Grown defect list not found"},
594 604
595 {0x1D00, "Miscompare during verify operation"}, 605 {0x1D00, "Miscompare during verify operation"},
606 {0x1D01, "Miscompare verify of unmapped LBA"},
596 607
597 {0x1E00, "Recovered id with ECC correction"}, 608 {0x1E00, "Recovered id with ECC correction"},
598 609
@@ -626,6 +637,7 @@ static const struct error_info additional[] =
626 {0x2405, "Security working key frozen"}, 637 {0x2405, "Security working key frozen"},
627 {0x2406, "Nonce not unique"}, 638 {0x2406, "Nonce not unique"},
628 {0x2407, "Nonce timestamp out of range"}, 639 {0x2407, "Nonce timestamp out of range"},
640 {0x2408, "Invalid XCDB"},
629 641
630 {0x2500, "Logical unit not supported"}, 642 {0x2500, "Logical unit not supported"},
631 643
@@ -656,10 +668,12 @@ static const struct error_info additional[] =
656 {0x2704, "Persistent write protect"}, 668 {0x2704, "Persistent write protect"},
657 {0x2705, "Permanent write protect"}, 669 {0x2705, "Permanent write protect"},
658 {0x2706, "Conditional write protect"}, 670 {0x2706, "Conditional write protect"},
671 {0x2707, "Space allocation failed write protect"},
659 672
660 {0x2800, "Not ready to ready change, medium may have changed"}, 673 {0x2800, "Not ready to ready change, medium may have changed"},
661 {0x2801, "Import or export element accessed"}, 674 {0x2801, "Import or export element accessed"},
662 {0x2802, "Format-layer may have changed"}, 675 {0x2802, "Format-layer may have changed"},
676 {0x2803, "Import/export element accessed, medium changed"},
663 677
664 {0x2900, "Power on, reset, or bus device reset occurred"}, 678 {0x2900, "Power on, reset, or bus device reset occurred"},
665 {0x2901, "Power on occurred"}, 679 {0x2901, "Power on occurred"},
@@ -680,11 +694,16 @@ static const struct error_info additional[] =
680 {0x2A07, "Implicit asymmetric access state transition failed"}, 694 {0x2A07, "Implicit asymmetric access state transition failed"},
681 {0x2A08, "Priority changed"}, 695 {0x2A08, "Priority changed"},
682 {0x2A09, "Capacity data has changed"}, 696 {0x2A09, "Capacity data has changed"},
697 {0x2A0A, "Error history I_T nexus cleared"},
698 {0x2A0B, "Error history snapshot released"},
699 {0x2A0C, "Error recovery attributes have changed"},
700 {0x2A0D, "Data encryption capabilities changed"},
683 {0x2A10, "Timestamp changed"}, 701 {0x2A10, "Timestamp changed"},
684 {0x2A11, "Data encryption parameters changed by another i_t nexus"}, 702 {0x2A11, "Data encryption parameters changed by another i_t nexus"},
685 {0x2A12, "Data encryption parameters changed by vendor specific " 703 {0x2A12, "Data encryption parameters changed by vendor specific "
686 "event"}, 704 "event"},
687 {0x2A13, "Data encryption key instance counter has changed"}, 705 {0x2A13, "Data encryption key instance counter has changed"},
706 {0x2A14, "SA creation capabilities data has changed"},
688 707
689 {0x2B00, "Copy cannot execute since host cannot disconnect"}, 708 {0x2B00, "Copy cannot execute since host cannot disconnect"},
690 709
@@ -723,6 +742,8 @@ static const struct error_info additional[] =
723 {0x300C, "WORM medium - overwrite attempted"}, 742 {0x300C, "WORM medium - overwrite attempted"},
724 {0x300D, "WORM medium - integrity check"}, 743 {0x300D, "WORM medium - integrity check"},
725 {0x3010, "Medium not formatted"}, 744 {0x3010, "Medium not formatted"},
745 {0x3011, "Incompatible volume type"},
746 {0x3012, "Incompatible volume qualifier"},
726 747
727 {0x3100, "Medium format corrupted"}, 748 {0x3100, "Medium format corrupted"},
728 {0x3101, "Format command failed"}, 749 {0x3101, "Format command failed"},
@@ -782,6 +803,10 @@ static const struct error_info additional[] =
782 {0x3B15, "Medium magazine unlocked"}, 803 {0x3B15, "Medium magazine unlocked"},
783 {0x3B16, "Mechanical positioning or changer error"}, 804 {0x3B16, "Mechanical positioning or changer error"},
784 {0x3B17, "Read past end of user object"}, 805 {0x3B17, "Read past end of user object"},
806 {0x3B18, "Element disabled"},
807 {0x3B19, "Element enabled"},
808 {0x3B1A, "Data transfer device removed"},
809 {0x3B1B, "Data transfer device inserted"},
785 810
786 {0x3D00, "Invalid bits in identify message"}, 811 {0x3D00, "Invalid bits in identify message"},
787 812
@@ -882,6 +907,8 @@ static const struct error_info additional[] =
882 {0x5506, "Auxiliary memory out of space"}, 907 {0x5506, "Auxiliary memory out of space"},
883 {0x5507, "Quota error"}, 908 {0x5507, "Quota error"},
884 {0x5508, "Maximum number of supplemental decryption keys exceeded"}, 909 {0x5508, "Maximum number of supplemental decryption keys exceeded"},
910 {0x5509, "Medium auxiliary memory not accessible"},
911 {0x550A, "Data currently unavailable"},
885 912
886 {0x5700, "Unable to recover table-of-contents"}, 913 {0x5700, "Unable to recover table-of-contents"},
887 914
@@ -993,6 +1020,12 @@ static const struct error_info additional[] =
993 {0x5E02, "Standby condition activated by timer"}, 1020 {0x5E02, "Standby condition activated by timer"},
994 {0x5E03, "Idle condition activated by command"}, 1021 {0x5E03, "Idle condition activated by command"},
995 {0x5E04, "Standby condition activated by command"}, 1022 {0x5E04, "Standby condition activated by command"},
1023 {0x5E05, "Idle_b condition activated by timer"},
1024 {0x5E06, "Idle_b condition activated by command"},
1025 {0x5E07, "Idle_c condition activated by timer"},
1026 {0x5E08, "Idle_c condition activated by command"},
1027 {0x5E09, "Standby_y condition activated by timer"},
1028 {0x5E0A, "Standby_y condition activated by command"},
996 {0x5E41, "Power state change to active"}, 1029 {0x5E41, "Power state change to active"},
997 {0x5E42, "Power state change to idle"}, 1030 {0x5E42, "Power state change to idle"},
998 {0x5E43, "Power state change to standby"}, 1031 {0x5E43, "Power state change to standby"},
@@ -1091,7 +1124,28 @@ static const struct error_info additional[] =
1091 {0x7403, "Incorrect data encryption key"}, 1124 {0x7403, "Incorrect data encryption key"},
1092 {0x7404, "Cryptographic integrity validation failed"}, 1125 {0x7404, "Cryptographic integrity validation failed"},
1093 {0x7405, "Error decrypting data"}, 1126 {0x7405, "Error decrypting data"},
1127 {0x7406, "Unknown signature verification key"},
1128 {0x7407, "Encryption parameters not useable"},
1129 {0x7408, "Digital signature validation failure"},
1130 {0x7409, "Encryption mode mismatch on read"},
1131 {0x740A, "Encrypted block not raw read enabled"},
1132 {0x740B, "Incorrect Encryption parameters"},
1133 {0x740C, "Unable to decrypt parameter list"},
1134 {0x740D, "Encryption algorithm disabled"},
1135 {0x7410, "SA creation parameter value invalid"},
1136 {0x7411, "SA creation parameter value rejected"},
1137 {0x7412, "Invalid SA usage"},
1138 {0x7421, "Data Encryption configuration prevented"},
1139 {0x7430, "SA creation parameter not supported"},
1140 {0x7440, "Authentication failed"},
1141 {0x7461, "External data encryption key manager access error"},
1142 {0x7462, "External data encryption key manager error"},
1143 {0x7463, "External data encryption key not found"},
1144 {0x7464, "External data encryption request not authorized"},
1145 {0x746E, "External data encryption control timeout"},
1146 {0x746F, "External data encryption control error"},
1094 {0x7471, "Logical unit access not authorized"}, 1147 {0x7471, "Logical unit access not authorized"},
1148 {0x7479, "Security conflict in translated device"},
1095 1149
1096 {0, NULL} 1150 {0, NULL}
1097}; 1151};
@@ -1103,12 +1157,12 @@ struct error_info2 {
1103 1157
1104static const struct error_info2 additional2[] = 1158static const struct error_info2 additional2[] =
1105{ 1159{
1106 {0x40,0x00,0x7f,"Ram failure (%x)"}, 1160 {0x40, 0x00, 0x7f, "Ram failure (%x)"},
1107 {0x40,0x80,0xff,"Diagnostic failure on component (%x)"}, 1161 {0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"},
1108 {0x41,0x00,0xff,"Data path failure (%x)"}, 1162 {0x41, 0x00, 0xff, "Data path failure (%x)"},
1109 {0x42,0x00,0xff,"Power-on or self-test failure (%x)"}, 1163 {0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"},
1110 {0x4D,0x00,0xff,"Tagged overlapped commands (queue tag %x)"}, 1164 {0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"},
1111 {0x70,0x00,0xff,"Decompression exception short algorithm id of %x"}, 1165 {0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"},
1112 {0, 0, 0, NULL} 1166 {0, 0, 0, NULL}
1113}; 1167};
1114 1168
@@ -1157,14 +1211,15 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq) {
1157 int i; 1211 int i;
1158 unsigned short code = ((asc << 8) | ascq); 1212 unsigned short code = ((asc << 8) | ascq);
1159 1213
1160 for (i=0; additional[i].text; i++) 1214 for (i = 0; additional[i].text; i++)
1161 if (additional[i].code12 == code) 1215 if (additional[i].code12 == code)
1162 return additional[i].text; 1216 return additional[i].text;
1163 for (i=0; additional2[i].fmt; i++) 1217 for (i = 0; additional2[i].fmt; i++) {
1164 if (additional2[i].code1 == asc && 1218 if (additional2[i].code1 == asc &&
1165 additional2[i].code2_min >= ascq && 1219 ascq >= additional2[i].code2_min &&
1166 additional2[i].code2_max <= ascq) 1220 ascq <= additional2[i].code2_max)
1167 return additional2[i].fmt; 1221 return additional2[i].fmt;
1222 }
1168#endif 1223#endif
1169 return NULL; 1224 return NULL;
1170} 1225}
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index a518f2eff19a..3ee1cbc89479 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -153,12 +153,24 @@ static int scsi_dh_handler_attach(struct scsi_device *sdev,
153 if (sdev->scsi_dh_data) { 153 if (sdev->scsi_dh_data) {
154 if (sdev->scsi_dh_data->scsi_dh != scsi_dh) 154 if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
155 err = -EBUSY; 155 err = -EBUSY;
156 } else if (scsi_dh->attach) 156 else
157 kref_get(&sdev->scsi_dh_data->kref);
158 } else if (scsi_dh->attach) {
157 err = scsi_dh->attach(sdev); 159 err = scsi_dh->attach(sdev);
158 160 if (!err) {
161 kref_init(&sdev->scsi_dh_data->kref);
162 sdev->scsi_dh_data->sdev = sdev;
163 }
164 }
159 return err; 165 return err;
160} 166}
161 167
168static void __detach_handler (struct kref *kref)
169{
170 struct scsi_dh_data *scsi_dh_data = container_of(kref, struct scsi_dh_data, kref);
171 scsi_dh_data->scsi_dh->detach(scsi_dh_data->sdev);
172}
173
162/* 174/*
163 * scsi_dh_handler_detach - Detach a device handler from a device 175 * scsi_dh_handler_detach - Detach a device handler from a device
164 * @sdev - SCSI device the device handler should be detached from 176 * @sdev - SCSI device the device handler should be detached from
@@ -180,7 +192,7 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev,
180 scsi_dh = sdev->scsi_dh_data->scsi_dh; 192 scsi_dh = sdev->scsi_dh_data->scsi_dh;
181 193
182 if (scsi_dh && scsi_dh->detach) 194 if (scsi_dh && scsi_dh->detach)
183 scsi_dh->detach(sdev); 195 kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
184} 196}
185 197
186/* 198/*
@@ -440,6 +452,39 @@ int scsi_dh_activate(struct request_queue *q)
440EXPORT_SYMBOL_GPL(scsi_dh_activate); 452EXPORT_SYMBOL_GPL(scsi_dh_activate);
441 453
442/* 454/*
455 * scsi_dh_set_params - set the parameters for the device as per the
456 * string specified in params.
457 * @q - Request queue that is associated with the scsi_device for
458 * which the parameters to be set.
459 * @params - parameters in the following format
460 * "no_of_params\0param1\0param2\0param3\0...\0"
461 * for example, string for 2 parameters with value 10 and 21
462 * is specified as "2\010\021\0".
463 */
464int scsi_dh_set_params(struct request_queue *q, const char *params)
465{
466 int err = -SCSI_DH_NOSYS;
467 unsigned long flags;
468 struct scsi_device *sdev;
469 struct scsi_device_handler *scsi_dh = NULL;
470
471 spin_lock_irqsave(q->queue_lock, flags);
472 sdev = q->queuedata;
473 if (sdev && sdev->scsi_dh_data)
474 scsi_dh = sdev->scsi_dh_data->scsi_dh;
475 if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev))
476 err = 0;
477 spin_unlock_irqrestore(q->queue_lock, flags);
478
479 if (err)
480 return err;
481 err = scsi_dh->set_params(sdev, params);
482 put_device(&sdev->sdev_gendev);
483 return err;
484}
485EXPORT_SYMBOL_GPL(scsi_dh_set_params);
486
487/*
443 * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for 488 * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
444 * the given name. FALSE(0) otherwise. 489 * the given name. FALSE(0) otherwise.
445 * @name - name of the device handler. 490 * @name - name of the device handler.
@@ -474,7 +519,6 @@ int scsi_dh_attach(struct request_queue *q, const char *name)
474 519
475 if (!err) { 520 if (!err) {
476 err = scsi_dh_handler_attach(sdev, scsi_dh); 521 err = scsi_dh_handler_attach(sdev, scsi_dh);
477
478 put_device(&sdev->sdev_gendev); 522 put_device(&sdev->sdev_gendev);
479 } 523 }
480 return err; 524 return err;
@@ -505,10 +549,8 @@ void scsi_dh_detach(struct request_queue *q)
505 return; 549 return;
506 550
507 if (sdev->scsi_dh_data) { 551 if (sdev->scsi_dh_data) {
508 /* if sdev is not on internal list, detach */
509 scsi_dh = sdev->scsi_dh_data->scsi_dh; 552 scsi_dh = sdev->scsi_dh_data->scsi_dh;
510 if (!device_handler_match(scsi_dh, sdev)) 553 scsi_dh_handler_detach(sdev, scsi_dh);
511 scsi_dh_handler_detach(sdev, scsi_dh);
512 } 554 }
513 put_device(&sdev->sdev_gendev); 555 put_device(&sdev->sdev_gendev);
514} 556}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index dba154c8ff64..b5cdefaf2608 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -663,7 +663,7 @@ static int alua_activate(struct scsi_device *sdev)
663 goto out; 663 goto out;
664 } 664 }
665 665
666 if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) 666 if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
667 err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h); 667 err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
668 668
669out: 669out:
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 0e572d2c5b0a..0cffe84976fe 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -561,6 +561,61 @@ done:
561 561
562 return result; 562 return result;
563} 563}
564/*
565 * params - parameters in the following format
566 * "no_of_params\0param1\0param2\0param3\0...\0"
567 * for example, string for 2 parameters with value 10 and 21
568 * is specified as "2\010\021\0".
569 */
570static int clariion_set_params(struct scsi_device *sdev, const char *params)
571{
572 struct clariion_dh_data *csdev = get_clariion_data(sdev);
573 unsigned int hr = 0, st = 0, argc;
574 const char *p = params;
575 int result = SCSI_DH_OK;
576
577 if ((sscanf(params, "%u", &argc) != 1) || (argc != 2))
578 return -EINVAL;
579
580 while (*p++)
581 ;
582 if ((sscanf(p, "%u", &st) != 1) || (st > 1))
583 return -EINVAL;
584
585 while (*p++)
586 ;
587 if ((sscanf(p, "%u", &hr) != 1) || (hr > 1))
588 return -EINVAL;
589
590 if (st)
591 csdev->flags |= CLARIION_SHORT_TRESPASS;
592 else
593 csdev->flags &= ~CLARIION_SHORT_TRESPASS;
594
595 if (hr)
596 csdev->flags |= CLARIION_HONOR_RESERVATIONS;
597 else
598 csdev->flags &= ~CLARIION_HONOR_RESERVATIONS;
599
600 /*
601 * If this path is owned, we have to send a trespass command
602 * with the new parameters. If not, simply return. Next trespass
603 * command would use the parameters.
604 */
605 if (csdev->lun_state != CLARIION_LUN_OWNED)
606 goto done;
607
608 csdev->lun_state = CLARIION_LUN_UNINITIALIZED;
609 result = send_trespass_cmd(sdev, csdev);
610 if (result != SCSI_DH_OK)
611 goto done;
612
613 /* Update status */
614 result = clariion_send_inquiry(sdev, csdev);
615
616done:
617 return result;
618}
564 619
565static const struct scsi_dh_devlist clariion_dev_list[] = { 620static const struct scsi_dh_devlist clariion_dev_list[] = {
566 {"DGC", "RAID"}, 621 {"DGC", "RAID"},
@@ -581,11 +636,9 @@ static struct scsi_device_handler clariion_dh = {
581 .check_sense = clariion_check_sense, 636 .check_sense = clariion_check_sense,
582 .activate = clariion_activate, 637 .activate = clariion_activate,
583 .prep_fn = clariion_prep_fn, 638 .prep_fn = clariion_prep_fn,
639 .set_params = clariion_set_params,
584}; 640};
585 641
586/*
587 * TODO: need some interface so we can set trespass values
588 */
589static int clariion_bus_attach(struct scsi_device *sdev) 642static int clariion_bus_attach(struct scsi_device *sdev)
590{ 643{
591 struct scsi_dh_data *scsi_dh_data; 644 struct scsi_dh_data *scsi_dh_data;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index fd0544f7da81..11c89311427e 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -112,6 +112,7 @@ struct c9_inquiry {
112 112
113#define SUBSYS_ID_LEN 16 113#define SUBSYS_ID_LEN 16
114#define SLOT_ID_LEN 2 114#define SLOT_ID_LEN 2
115#define ARRAY_LABEL_LEN 31
115 116
116struct c4_inquiry { 117struct c4_inquiry {
117 u8 peripheral_info; 118 u8 peripheral_info;
@@ -135,6 +136,8 @@ struct rdac_controller {
135 struct rdac_pg_legacy legacy; 136 struct rdac_pg_legacy legacy;
136 struct rdac_pg_expanded expanded; 137 struct rdac_pg_expanded expanded;
137 } mode_select; 138 } mode_select;
139 u8 index;
140 u8 array_name[ARRAY_LABEL_LEN];
138}; 141};
139struct c8_inquiry { 142struct c8_inquiry {
140 u8 peripheral_info; 143 u8 peripheral_info;
@@ -198,6 +201,31 @@ static const char *lun_state[] =
198static LIST_HEAD(ctlr_list); 201static LIST_HEAD(ctlr_list);
199static DEFINE_SPINLOCK(list_lock); 202static DEFINE_SPINLOCK(list_lock);
200 203
204/*
205 * module parameter to enable rdac debug logging.
206 * 2 bits for each type of logging, only two types defined for now
207 * Can be enhanced if required at later point
208 */
209static int rdac_logging = 1;
210module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
211MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
212 "Default is 1 - failover logging enabled, "
213 "set it to 0xF to enable all the logs");
214
215#define RDAC_LOG_FAILOVER 0
216#define RDAC_LOG_SENSE 2
217
218#define RDAC_LOG_BITS 2
219
220#define RDAC_LOG_LEVEL(SHIFT) \
221 ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
222
223#define RDAC_LOG(SHIFT, sdev, f, arg...) \
224do { \
225 if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
226 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
227} while (0);
228
201static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) 229static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
202{ 230{
203 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; 231 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
@@ -303,7 +331,8 @@ static void release_controller(struct kref *kref)
303 kfree(ctlr); 331 kfree(ctlr);
304} 332}
305 333
306static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id) 334static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
335 char *array_name)
307{ 336{
308 struct rdac_controller *ctlr, *tmp; 337 struct rdac_controller *ctlr, *tmp;
309 338
@@ -324,6 +353,14 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
324 /* initialize fields of controller */ 353 /* initialize fields of controller */
325 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); 354 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
326 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); 355 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
356 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
357
358 /* update the controller index */
359 if (slot_id[1] == 0x31)
360 ctlr->index = 0;
361 else
362 ctlr->index = 1;
363
327 kref_init(&ctlr->kref); 364 kref_init(&ctlr->kref);
328 ctlr->use_ms10 = -1; 365 ctlr->use_ms10 = -1;
329 list_add(&ctlr->node, &ctlr_list); 366 list_add(&ctlr->node, &ctlr_list);
@@ -363,9 +400,10 @@ done:
363 return err; 400 return err;
364} 401}
365 402
366static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h) 403static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
404 char *array_name)
367{ 405{
368 int err; 406 int err, i;
369 struct c8_inquiry *inqp; 407 struct c8_inquiry *inqp;
370 408
371 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h); 409 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
@@ -377,6 +415,11 @@ static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
377 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') 415 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
378 return SCSI_DH_NOSYS; 416 return SCSI_DH_NOSYS;
379 h->lun = inqp->lun[7]; /* Uses only the last byte */ 417 h->lun = inqp->lun[7]; /* Uses only the last byte */
418
419 for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
420 *(array_name+i) = inqp->array_user_label[(2*i)+1];
421
422 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
380 } 423 }
381 return err; 424 return err;
382} 425}
@@ -410,7 +453,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
410} 453}
411 454
412static int initialize_controller(struct scsi_device *sdev, 455static int initialize_controller(struct scsi_device *sdev,
413 struct rdac_dh_data *h) 456 struct rdac_dh_data *h, char *array_name)
414{ 457{
415 int err; 458 int err;
416 struct c4_inquiry *inqp; 459 struct c4_inquiry *inqp;
@@ -418,7 +461,8 @@ static int initialize_controller(struct scsi_device *sdev,
418 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); 461 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
419 if (err == SCSI_DH_OK) { 462 if (err == SCSI_DH_OK) {
420 inqp = &h->inq.c4; 463 inqp = &h->inq.c4;
421 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id); 464 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
465 array_name);
422 if (!h->ctlr) 466 if (!h->ctlr)
423 err = SCSI_DH_RES_TEMP_UNAVAIL; 467 err = SCSI_DH_RES_TEMP_UNAVAIL;
424 } 468 }
@@ -450,6 +494,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
450{ 494{
451 struct scsi_sense_hdr sense_hdr; 495 struct scsi_sense_hdr sense_hdr;
452 int err = SCSI_DH_IO, ret; 496 int err = SCSI_DH_IO, ret;
497 struct rdac_dh_data *h = get_rdac_data(sdev);
453 498
454 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 499 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
455 if (!ret) 500 if (!ret)
@@ -478,11 +523,14 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
478 err = SCSI_DH_RETRY; 523 err = SCSI_DH_RETRY;
479 break; 524 break;
480 default: 525 default:
481 sdev_printk(KERN_INFO, sdev, 526 break;
482 "MODE_SELECT failed with sense %02x/%02x/%02x.\n",
483 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
484 } 527 }
485 528
529 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
530 "MODE_SELECT returned with sense %02x/%02x/%02x",
531 (char *) h->ctlr->array_name, h->ctlr->index,
532 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
533
486done: 534done:
487 return err; 535 return err;
488} 536}
@@ -499,7 +547,9 @@ retry:
499 if (!rq) 547 if (!rq)
500 goto done; 548 goto done;
501 549
502 sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n", 550 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
551 "%s MODE_SELECT command",
552 (char *) h->ctlr->array_name, h->ctlr->index,
503 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); 553 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
504 554
505 err = blk_execute_rq(q, NULL, rq, 1); 555 err = blk_execute_rq(q, NULL, rq, 1);
@@ -509,8 +559,12 @@ retry:
509 if (err == SCSI_DH_RETRY && retry_cnt--) 559 if (err == SCSI_DH_RETRY && retry_cnt--)
510 goto retry; 560 goto retry;
511 } 561 }
512 if (err == SCSI_DH_OK) 562 if (err == SCSI_DH_OK) {
513 h->state = RDAC_STATE_ACTIVE; 563 h->state = RDAC_STATE_ACTIVE;
564 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
565 "MODE_SELECT completed",
566 (char *) h->ctlr->array_name, h->ctlr->index);
567 }
514 568
515done: 569done:
516 return err; 570 return err;
@@ -525,17 +579,6 @@ static int rdac_activate(struct scsi_device *sdev)
525 if (err != SCSI_DH_OK) 579 if (err != SCSI_DH_OK)
526 goto done; 580 goto done;
527 581
528 if (!h->ctlr) {
529 err = initialize_controller(sdev, h);
530 if (err != SCSI_DH_OK)
531 goto done;
532 }
533
534 if (h->ctlr->use_ms10 == -1) {
535 err = set_mode_select(sdev, h);
536 if (err != SCSI_DH_OK)
537 goto done;
538 }
539 if (h->lun_state == RDAC_LUN_UNOWNED) 582 if (h->lun_state == RDAC_LUN_UNOWNED)
540 err = send_mode_select(sdev, h); 583 err = send_mode_select(sdev, h);
541done: 584done:
@@ -559,6 +602,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
559 struct scsi_sense_hdr *sense_hdr) 602 struct scsi_sense_hdr *sense_hdr)
560{ 603{
561 struct rdac_dh_data *h = get_rdac_data(sdev); 604 struct rdac_dh_data *h = get_rdac_data(sdev);
605
606 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
607 "I/O returned with sense %02x/%02x/%02x",
608 (char *) h->ctlr->array_name, h->ctlr->index,
609 sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
610
562 switch (sense_hdr->sense_key) { 611 switch (sense_hdr->sense_key) {
563 case NOT_READY: 612 case NOT_READY:
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) 613 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
@@ -628,11 +677,18 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
628 {"SGI", "IS"}, 677 {"SGI", "IS"},
629 {"STK", "OPENstorage D280"}, 678 {"STK", "OPENstorage D280"},
630 {"SUN", "CSM200_R"}, 679 {"SUN", "CSM200_R"},
680 {"SUN", "LCSM100_I"},
681 {"SUN", "LCSM100_S"},
682 {"SUN", "LCSM100_E"},
631 {"SUN", "LCSM100_F"}, 683 {"SUN", "LCSM100_F"},
632 {"DELL", "MD3000"}, 684 {"DELL", "MD3000"},
633 {"DELL", "MD3000i"}, 685 {"DELL", "MD3000i"},
686 {"DELL", "MD32xx"},
687 {"DELL", "MD32xxi"},
634 {"LSI", "INF-01-00"}, 688 {"LSI", "INF-01-00"},
635 {"ENGENIO", "INF-01-00"}, 689 {"ENGENIO", "INF-01-00"},
690 {"STK", "FLEXLINE 380"},
691 {"SUN", "CSM100_R_FC"},
636 {NULL, NULL}, 692 {NULL, NULL},
637}; 693};
638 694
@@ -656,6 +712,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
656 struct rdac_dh_data *h; 712 struct rdac_dh_data *h;
657 unsigned long flags; 713 unsigned long flags;
658 int err; 714 int err;
715 char array_name[ARRAY_LABEL_LEN];
659 716
660 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 717 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
661 + sizeof(*h) , GFP_KERNEL); 718 + sizeof(*h) , GFP_KERNEL);
@@ -670,16 +727,24 @@ static int rdac_bus_attach(struct scsi_device *sdev)
670 h->lun = UNINITIALIZED_LUN; 727 h->lun = UNINITIALIZED_LUN;
671 h->state = RDAC_STATE_ACTIVE; 728 h->state = RDAC_STATE_ACTIVE;
672 729
673 err = get_lun(sdev, h); 730 err = get_lun_info(sdev, h, array_name);
674 if (err != SCSI_DH_OK) 731 if (err != SCSI_DH_OK)
675 goto failed; 732 goto failed;
676 733
677 err = check_ownership(sdev, h); 734 err = initialize_controller(sdev, h, array_name);
678 if (err != SCSI_DH_OK) 735 if (err != SCSI_DH_OK)
679 goto failed; 736 goto failed;
680 737
738 err = check_ownership(sdev, h);
739 if (err != SCSI_DH_OK)
740 goto clean_ctlr;
741
742 err = set_mode_select(sdev, h);
743 if (err != SCSI_DH_OK)
744 goto clean_ctlr;
745
681 if (!try_module_get(THIS_MODULE)) 746 if (!try_module_get(THIS_MODULE))
682 goto failed; 747 goto clean_ctlr;
683 748
684 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 749 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
685 sdev->scsi_dh_data = scsi_dh_data; 750 sdev->scsi_dh_data = scsi_dh_data;
@@ -691,6 +756,9 @@ static int rdac_bus_attach(struct scsi_device *sdev)
691 756
692 return 0; 757 return 0;
693 758
759clean_ctlr:
760 kref_put(&h->ctlr->kref, release_controller);
761
694failed: 762failed:
695 kfree(scsi_dh_data); 763 kfree(scsi_dh_data);
696 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", 764 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 0a5609bb5817..704b8e034946 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -49,9 +49,20 @@ MODULE_AUTHOR("Open-FCoE.org");
49MODULE_DESCRIPTION("FCoE"); 49MODULE_DESCRIPTION("FCoE");
50MODULE_LICENSE("GPL v2"); 50MODULE_LICENSE("GPL v2");
51 51
52/* Performance tuning parameters for fcoe */
53static unsigned int fcoe_ddp_min;
54module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
55MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
56 "Direct Data Placement (DDP).");
57
58DEFINE_MUTEX(fcoe_config_mutex);
59
60/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
61static DECLARE_COMPLETION(fcoe_flush_completion);
62
52/* fcoe host list */ 63/* fcoe host list */
64/* must only by accessed under the RTNL mutex */
53LIST_HEAD(fcoe_hostlist); 65LIST_HEAD(fcoe_hostlist);
54DEFINE_RWLOCK(fcoe_hostlist_lock);
55DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 66DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
56 67
57/* Function Prototypes */ 68/* Function Prototypes */
@@ -66,12 +77,13 @@ static int fcoe_link_ok(struct fc_lport *lp);
66 77
67static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); 78static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
68static int fcoe_hostlist_add(const struct fc_lport *); 79static int fcoe_hostlist_add(const struct fc_lport *);
69static int fcoe_hostlist_remove(const struct fc_lport *);
70 80
71static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); 81static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
72static int fcoe_device_notification(struct notifier_block *, ulong, void *); 82static int fcoe_device_notification(struct notifier_block *, ulong, void *);
73static void fcoe_dev_setup(void); 83static void fcoe_dev_setup(void);
74static void fcoe_dev_cleanup(void); 84static void fcoe_dev_cleanup(void);
85static struct fcoe_interface *
86 fcoe_hostlist_lookup_port(const struct net_device *dev);
75 87
76/* notification function from net device */ 88/* notification function from net device */
77static struct notifier_block fcoe_notifier = { 89static struct notifier_block fcoe_notifier = {
@@ -132,6 +144,180 @@ static struct scsi_host_template fcoe_shost_template = {
132 .max_sectors = 0xffff, 144 .max_sectors = 0xffff,
133}; 145};
134 146
147static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
148 struct packet_type *ptype,
149 struct net_device *orig_dev);
150/**
151 * fcoe_interface_setup()
152 * @fcoe: new fcoe_interface
153 * @netdev : ptr to the associated netdevice struct
154 *
155 * Returns : 0 for success
156 * Locking: must be called with the RTNL mutex held
157 */
158static int fcoe_interface_setup(struct fcoe_interface *fcoe,
159 struct net_device *netdev)
160{
161 struct fcoe_ctlr *fip = &fcoe->ctlr;
162 struct netdev_hw_addr *ha;
163 u8 flogi_maddr[ETH_ALEN];
164
165 fcoe->netdev = netdev;
166
167 /* Do not support for bonding device */
168 if ((netdev->priv_flags & IFF_MASTER_ALB) ||
169 (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
170 (netdev->priv_flags & IFF_MASTER_8023AD)) {
171 return -EOPNOTSUPP;
172 }
173
174 /* look for SAN MAC address, if multiple SAN MACs exist, only
175 * use the first one for SPMA */
176 rcu_read_lock();
177 for_each_dev_addr(netdev, ha) {
178 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
179 (is_valid_ether_addr(fip->ctl_src_addr))) {
180 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
181 fip->spma = 1;
182 break;
183 }
184 }
185 rcu_read_unlock();
186
187 /* setup Source Mac Address */
188 if (!fip->spma)
189 memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
190
191 /*
192 * Add FCoE MAC address as second unicast MAC address
193 * or enter promiscuous mode if not capable of listening
194 * for multiple unicast MACs.
195 */
196 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
197 dev_unicast_add(netdev, flogi_maddr);
198 if (fip->spma)
199 dev_unicast_add(netdev, fip->ctl_src_addr);
200 dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
201
202 /*
203 * setup the receive function from ethernet driver
204 * on the ethertype for the given device
205 */
206 fcoe->fcoe_packet_type.func = fcoe_rcv;
207 fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
208 fcoe->fcoe_packet_type.dev = netdev;
209 dev_add_pack(&fcoe->fcoe_packet_type);
210
211 fcoe->fip_packet_type.func = fcoe_fip_recv;
212 fcoe->fip_packet_type.type = htons(ETH_P_FIP);
213 fcoe->fip_packet_type.dev = netdev;
214 dev_add_pack(&fcoe->fip_packet_type);
215
216 return 0;
217}
218
219static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
220static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new);
221static void fcoe_destroy_work(struct work_struct *work);
222
223/**
224 * fcoe_interface_create()
225 * @netdev: network interface
226 *
227 * Returns: pointer to a struct fcoe_interface or NULL on error
228 */
229static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
230{
231 struct fcoe_interface *fcoe;
232
233 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
234 if (!fcoe) {
235 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
236 return NULL;
237 }
238
239 dev_hold(netdev);
240 kref_init(&fcoe->kref);
241
242 /*
243 * Initialize FIP.
244 */
245 fcoe_ctlr_init(&fcoe->ctlr);
246 fcoe->ctlr.send = fcoe_fip_send;
247 fcoe->ctlr.update_mac = fcoe_update_src_mac;
248
249 fcoe_interface_setup(fcoe, netdev);
250
251 return fcoe;
252}
253
254/**
255 * fcoe_interface_cleanup() - clean up netdev configurations
256 * @fcoe:
257 *
258 * Caller must be holding the RTNL mutex
259 */
260void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
261{
262 struct net_device *netdev = fcoe->netdev;
263 struct fcoe_ctlr *fip = &fcoe->ctlr;
264 u8 flogi_maddr[ETH_ALEN];
265
266 /*
267 * Don't listen for Ethernet packets anymore.
268 * synchronize_net() ensures that the packet handlers are not running
269 * on another CPU. dev_remove_pack() would do that, this calls the
270 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
271 */
272 __dev_remove_pack(&fcoe->fcoe_packet_type);
273 __dev_remove_pack(&fcoe->fip_packet_type);
274 synchronize_net();
275
276 /* Delete secondary MAC addresses */
277 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
278 dev_unicast_delete(netdev, flogi_maddr);
279 if (!is_zero_ether_addr(fip->data_src_addr))
280 dev_unicast_delete(netdev, fip->data_src_addr);
281 if (fip->spma)
282 dev_unicast_delete(netdev, fip->ctl_src_addr);
283 dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
284}
285
286/**
287 * fcoe_interface_release() - fcoe_port kref release function
288 * @kref: embedded reference count in an fcoe_interface struct
289 */
290static void fcoe_interface_release(struct kref *kref)
291{
292 struct fcoe_interface *fcoe;
293 struct net_device *netdev;
294
295 fcoe = container_of(kref, struct fcoe_interface, kref);
296 netdev = fcoe->netdev;
297 /* tear-down the FCoE controller */
298 fcoe_ctlr_destroy(&fcoe->ctlr);
299 kfree(fcoe);
300 dev_put(netdev);
301}
302
303/**
304 * fcoe_interface_get()
305 * @fcoe:
306 */
307static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
308{
309 kref_get(&fcoe->kref);
310}
311
312/**
313 * fcoe_interface_put()
314 * @fcoe:
315 */
316static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
317{
318 kref_put(&fcoe->kref, fcoe_interface_release);
319}
320
135/** 321/**
136 * fcoe_fip_recv - handle a received FIP frame. 322 * fcoe_fip_recv - handle a received FIP frame.
137 * @skb: the receive skb 323 * @skb: the receive skb
@@ -145,10 +331,10 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
145 struct packet_type *ptype, 331 struct packet_type *ptype,
146 struct net_device *orig_dev) 332 struct net_device *orig_dev)
147{ 333{
148 struct fcoe_softc *fc; 334 struct fcoe_interface *fcoe;
149 335
150 fc = container_of(ptype, struct fcoe_softc, fip_packet_type); 336 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
151 fcoe_ctlr_recv(&fc->ctlr, skb); 337 fcoe_ctlr_recv(&fcoe->ctlr, skb);
152 return 0; 338 return 0;
153} 339}
154 340
@@ -159,7 +345,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
159 */ 345 */
160static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 346static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
161{ 347{
162 skb->dev = fcoe_from_ctlr(fip)->real_dev; 348 skb->dev = fcoe_from_ctlr(fip)->netdev;
163 dev_queue_xmit(skb); 349 dev_queue_xmit(skb);
164} 350}
165 351
@@ -174,13 +360,13 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
174 */ 360 */
175static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new) 361static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
176{ 362{
177 struct fcoe_softc *fc; 363 struct fcoe_interface *fcoe;
178 364
179 fc = fcoe_from_ctlr(fip); 365 fcoe = fcoe_from_ctlr(fip);
180 rtnl_lock(); 366 rtnl_lock();
181 if (!is_zero_ether_addr(old)) 367 if (!is_zero_ether_addr(old))
182 dev_unicast_delete(fc->real_dev, old); 368 dev_unicast_delete(fcoe->netdev, old);
183 dev_unicast_add(fc->real_dev, new); 369 dev_unicast_add(fcoe->netdev, new);
184 rtnl_unlock(); 370 rtnl_unlock();
185} 371}
186 372
@@ -217,30 +403,6 @@ static int fcoe_lport_config(struct fc_lport *lp)
217} 403}
218 404
219/** 405/**
220 * fcoe_netdev_cleanup() - clean up netdev configurations
221 * @fc: ptr to the fcoe_softc
222 */
223void fcoe_netdev_cleanup(struct fcoe_softc *fc)
224{
225 u8 flogi_maddr[ETH_ALEN];
226
227 /* Don't listen for Ethernet packets anymore */
228 dev_remove_pack(&fc->fcoe_packet_type);
229 dev_remove_pack(&fc->fip_packet_type);
230
231 /* Delete secondary MAC addresses */
232 rtnl_lock();
233 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
234 dev_unicast_delete(fc->real_dev, flogi_maddr);
235 if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
236 dev_unicast_delete(fc->real_dev, fc->ctlr.data_src_addr);
237 if (fc->ctlr.spma)
238 dev_unicast_delete(fc->real_dev, fc->ctlr.ctl_src_addr);
239 dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
240 rtnl_unlock();
241}
242
243/**
244 * fcoe_queue_timer() - fcoe queue timer 406 * fcoe_queue_timer() - fcoe queue timer
245 * @lp: the fc_lport pointer 407 * @lp: the fc_lport pointer
246 * 408 *
@@ -265,116 +427,53 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
265{ 427{
266 u32 mfs; 428 u32 mfs;
267 u64 wwnn, wwpn; 429 u64 wwnn, wwpn;
268 struct fcoe_softc *fc; 430 struct fcoe_interface *fcoe;
269 u8 flogi_maddr[ETH_ALEN]; 431 struct fcoe_port *port;
270 struct netdev_hw_addr *ha;
271 432
272 /* Setup lport private data to point to fcoe softc */ 433 /* Setup lport private data to point to fcoe softc */
273 fc = lport_priv(lp); 434 port = lport_priv(lp);
274 fc->ctlr.lp = lp; 435 fcoe = port->fcoe;
275 fc->real_dev = netdev;
276 fc->phys_dev = netdev;
277
278 /* Require support for get_pauseparam ethtool op. */
279 if (netdev->priv_flags & IFF_802_1Q_VLAN)
280 fc->phys_dev = vlan_dev_real_dev(netdev);
281
282 /* Do not support for bonding device */
283 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
284 (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
285 (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
286 return -EOPNOTSUPP;
287 }
288 436
289 /* 437 /*
290 * Determine max frame size based on underlying device and optional 438 * Determine max frame size based on underlying device and optional
291 * user-configured limit. If the MFS is too low, fcoe_link_ok() 439 * user-configured limit. If the MFS is too low, fcoe_link_ok()
292 * will return 0, so do this first. 440 * will return 0, so do this first.
293 */ 441 */
294 mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) + 442 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
295 sizeof(struct fcoe_crc_eof)); 443 sizeof(struct fcoe_crc_eof));
296 if (fc_set_mfs(lp, mfs)) 444 if (fc_set_mfs(lp, mfs))
297 return -EINVAL; 445 return -EINVAL;
298 446
299 /* offload features support */ 447 /* offload features support */
300 if (fc->real_dev->features & NETIF_F_SG) 448 if (netdev->features & NETIF_F_SG)
301 lp->sg_supp = 1; 449 lp->sg_supp = 1;
302 450
303#ifdef NETIF_F_FCOE_CRC
304 if (netdev->features & NETIF_F_FCOE_CRC) { 451 if (netdev->features & NETIF_F_FCOE_CRC) {
305 lp->crc_offload = 1; 452 lp->crc_offload = 1;
306 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n"); 453 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
307 } 454 }
308#endif
309#ifdef NETIF_F_FSO
310 if (netdev->features & NETIF_F_FSO) { 455 if (netdev->features & NETIF_F_FSO) {
311 lp->seq_offload = 1; 456 lp->seq_offload = 1;
312 lp->lso_max = netdev->gso_max_size; 457 lp->lso_max = netdev->gso_max_size;
313 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", 458 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
314 lp->lso_max); 459 lp->lso_max);
315 } 460 }
316#endif
317 if (netdev->fcoe_ddp_xid) { 461 if (netdev->fcoe_ddp_xid) {
318 lp->lro_enabled = 1; 462 lp->lro_enabled = 1;
319 lp->lro_xid = netdev->fcoe_ddp_xid; 463 lp->lro_xid = netdev->fcoe_ddp_xid;
320 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n", 464 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
321 lp->lro_xid); 465 lp->lro_xid);
322 } 466 }
323 skb_queue_head_init(&fc->fcoe_pending_queue); 467 skb_queue_head_init(&port->fcoe_pending_queue);
324 fc->fcoe_pending_queue_active = 0; 468 port->fcoe_pending_queue_active = 0;
325 setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp); 469 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp);
326
327 /* look for SAN MAC address, if multiple SAN MACs exist, only
328 * use the first one for SPMA */
329 rcu_read_lock();
330 for_each_dev_addr(netdev, ha) {
331 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
332 (is_valid_ether_addr(fc->ctlr.ctl_src_addr))) {
333 memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
334 fc->ctlr.spma = 1;
335 break;
336 }
337 }
338 rcu_read_unlock();
339
340 /* setup Source Mac Address */
341 if (!fc->ctlr.spma)
342 memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
343 fc->real_dev->addr_len);
344 470
345 wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0); 471 wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0);
346 fc_set_wwnn(lp, wwnn); 472 fc_set_wwnn(lp, wwnn);
347 /* XXX - 3rd arg needs to be vlan id */ 473 /* XXX - 3rd arg needs to be vlan id */
348 wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0); 474 wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0);
349 fc_set_wwpn(lp, wwpn); 475 fc_set_wwpn(lp, wwpn);
350 476
351 /*
352 * Add FCoE MAC address as second unicast MAC address
353 * or enter promiscuous mode if not capable of listening
354 * for multiple unicast MACs.
355 */
356 rtnl_lock();
357 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
358 dev_unicast_add(fc->real_dev, flogi_maddr);
359 if (fc->ctlr.spma)
360 dev_unicast_add(fc->real_dev, fc->ctlr.ctl_src_addr);
361 dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
362 rtnl_unlock();
363
364 /*
365 * setup the receive function from ethernet driver
366 * on the ethertype for the given device
367 */
368 fc->fcoe_packet_type.func = fcoe_rcv;
369 fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
370 fc->fcoe_packet_type.dev = fc->real_dev;
371 dev_add_pack(&fc->fcoe_packet_type);
372
373 fc->fip_packet_type.func = fcoe_fip_recv;
374 fc->fip_packet_type.type = htons(ETH_P_FIP);
375 fc->fip_packet_type.dev = fc->real_dev;
376 dev_add_pack(&fc->fip_packet_type);
377
378 return 0; 477 return 0;
379} 478}
380 479
@@ -415,86 +514,140 @@ static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
415 return 0; 514 return 0;
416} 515}
417 516
517/*
518 * fcoe_oem_match() - match for read types IO
519 * @fp: the fc_frame for new IO.
520 *
521 * Returns : true for read types IO, otherwise returns false.
522 */
523bool fcoe_oem_match(struct fc_frame *fp)
524{
525 return fc_fcp_is_read(fr_fsp(fp)) &&
526 (fr_fsp(fp)->data_len > fcoe_ddp_min);
527}
528
418/** 529/**
419 * fcoe_em_config() - allocates em for this lport 530 * fcoe_em_config() - allocates em for this lport
420 * @lp: the port that em is to allocated for 531 * @lp: the fcoe that em is to allocated for
421 * 532 *
422 * Returns : 0 on success 533 * Returns : 0 on success
423 */ 534 */
424static inline int fcoe_em_config(struct fc_lport *lp) 535static inline int fcoe_em_config(struct fc_lport *lp)
425{ 536{
426 BUG_ON(lp->emp); 537 struct fcoe_port *port = lport_priv(lp);
538 struct fcoe_interface *fcoe = port->fcoe;
539 struct fcoe_interface *oldfcoe = NULL;
540 struct net_device *old_real_dev, *cur_real_dev;
541 u16 min_xid = FCOE_MIN_XID;
542 u16 max_xid = FCOE_MAX_XID;
427 543
428 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, 544 /*
429 FCOE_MIN_XID, FCOE_MAX_XID); 545 * Check if need to allocate an em instance for
430 if (!lp->emp) 546 * offload exchange ids to be shared across all VN_PORTs/lport.
547 */
548 if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
549 lp->lro_xid = 0;
550 goto skip_oem;
551 }
552
553 /*
554 * Reuse existing offload em instance in case
555 * it is already allocated on real eth device
556 */
557 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
558 cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
559 else
560 cur_real_dev = fcoe->netdev;
561
562 list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
563 if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
564 old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
565 else
566 old_real_dev = oldfcoe->netdev;
567
568 if (cur_real_dev == old_real_dev) {
569 fcoe->oem = oldfcoe->oem;
570 break;
571 }
572 }
573
574 if (fcoe->oem) {
575 if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) {
576 printk(KERN_ERR "fcoe_em_config: failed to add "
577 "offload em:%p on interface:%s\n",
578 fcoe->oem, fcoe->netdev->name);
579 return -ENOMEM;
580 }
581 } else {
582 fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
583 FCOE_MIN_XID, lp->lro_xid,
584 fcoe_oem_match);
585 if (!fcoe->oem) {
586 printk(KERN_ERR "fcoe_em_config: failed to allocate "
587 "em for offload exches on interface:%s\n",
588 fcoe->netdev->name);
589 return -ENOMEM;
590 }
591 }
592
593 /*
594 * Exclude offload EM xid range from next EM xid range.
595 */
596 min_xid += lp->lro_xid + 1;
597
598skip_oem:
599 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
600 printk(KERN_ERR "fcoe_em_config: failed to "
601 "allocate em on interface %s\n", fcoe->netdev->name);
431 return -ENOMEM; 602 return -ENOMEM;
603 }
432 604
433 return 0; 605 return 0;
434} 606}
435 607
436/** 608/**
437 * fcoe_if_destroy() - FCoE software HBA tear-down function 609 * fcoe_if_destroy() - FCoE software HBA tear-down function
438 * @netdev: ptr to the associated net_device 610 * @lport: fc_lport to destroy
439 *
440 * Returns: 0 if link is OK for use by FCoE.
441 */ 611 */
442static int fcoe_if_destroy(struct net_device *netdev) 612static void fcoe_if_destroy(struct fc_lport *lport)
443{ 613{
444 struct fc_lport *lp = NULL; 614 struct fcoe_port *port = lport_priv(lport);
445 struct fcoe_softc *fc; 615 struct fcoe_interface *fcoe = port->fcoe;
446 616 struct net_device *netdev = fcoe->netdev;
447 BUG_ON(!netdev);
448 617
449 FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); 618 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
450 619
451 lp = fcoe_hostlist_lookup(netdev);
452 if (!lp)
453 return -ENODEV;
454
455 fc = lport_priv(lp);
456
457 /* Logout of the fabric */ 620 /* Logout of the fabric */
458 fc_fabric_logoff(lp); 621 fc_fabric_logoff(lport);
459 622
460 /* Remove the instance from fcoe's list */ 623 /* Cleanup the fc_lport */
461 fcoe_hostlist_remove(lp); 624 fc_lport_destroy(lport);
625 fc_fcp_destroy(lport);
462 626
463 /* clean up netdev configurations */ 627 /* Stop the transmit retry timer */
464 fcoe_netdev_cleanup(fc); 628 del_timer_sync(&port->timer);
465 629
466 /* tear-down the FCoE controller */ 630 /* Free existing transmit skbs */
467 fcoe_ctlr_destroy(&fc->ctlr); 631 fcoe_clean_pending_queue(lport);
468 632
469 /* Cleanup the fc_lport */ 633 /* receives may not be stopped until after this */
470 fc_lport_destroy(lp); 634 fcoe_interface_put(fcoe);
471 fc_fcp_destroy(lp); 635
636 /* Free queued packets for the per-CPU receive threads */
637 fcoe_percpu_clean(lport);
472 638
473 /* Detach from the scsi-ml */ 639 /* Detach from the scsi-ml */
474 fc_remove_host(lp->host); 640 fc_remove_host(lport->host);
475 scsi_remove_host(lp->host); 641 scsi_remove_host(lport->host);
476 642
477 /* There are no more rports or I/O, free the EM */ 643 /* There are no more rports or I/O, free the EM */
478 if (lp->emp) 644 fc_exch_mgr_free(lport);
479 fc_exch_mgr_free(lp->emp);
480
481 /* Free the per-CPU receive threads */
482 fcoe_percpu_clean(lp);
483
484 /* Free existing skbs */
485 fcoe_clean_pending_queue(lp);
486
487 /* Stop the timer */
488 del_timer_sync(&fc->timer);
489 645
490 /* Free memory used by statistical counters */ 646 /* Free memory used by statistical counters */
491 fc_lport_free_stats(lp); 647 fc_lport_free_stats(lport);
492
493 /* Release the net_device and Scsi_Host */
494 dev_put(fc->real_dev);
495 scsi_host_put(lp->host);
496 648
497 return 0; 649 /* Release the Scsi_Host */
650 scsi_host_put(lport->host);
498} 651}
499 652
500/* 653/*
@@ -540,106 +693,96 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
540}; 693};
541 694
542/** 695/**
543 * fcoe_if_create() - this function creates the fcoe interface 696 * fcoe_if_create() - this function creates the fcoe port
544 * @netdev: pointer the associated netdevice 697 * @fcoe: fcoe_interface structure to create an fc_lport instance on
698 * @parent: device pointer to be the parent in sysfs for the SCSI host
545 * 699 *
546 * Creates fc_lport struct and scsi_host for lport, configures lport 700 * Creates fc_lport struct and scsi_host for lport, configures lport.
547 * and starts fabric login.
548 * 701 *
549 * Returns : 0 on success 702 * Returns : The allocated fc_lport or an error pointer
550 */ 703 */
551static int fcoe_if_create(struct net_device *netdev) 704static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
705 struct device *parent)
552{ 706{
553 int rc; 707 int rc;
554 struct fc_lport *lp = NULL; 708 struct fc_lport *lport = NULL;
555 struct fcoe_softc *fc; 709 struct fcoe_port *port;
556 struct Scsi_Host *shost; 710 struct Scsi_Host *shost;
557 711 struct net_device *netdev = fcoe->netdev;
558 BUG_ON(!netdev);
559 712
560 FCOE_NETDEV_DBG(netdev, "Create Interface\n"); 713 FCOE_NETDEV_DBG(netdev, "Create Interface\n");
561 714
562 lp = fcoe_hostlist_lookup(netdev);
563 if (lp)
564 return -EEXIST;
565
566 shost = libfc_host_alloc(&fcoe_shost_template, 715 shost = libfc_host_alloc(&fcoe_shost_template,
567 sizeof(struct fcoe_softc)); 716 sizeof(struct fcoe_port));
568 if (!shost) { 717 if (!shost) {
569 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); 718 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
570 return -ENOMEM; 719 rc = -ENOMEM;
720 goto out;
571 } 721 }
572 lp = shost_priv(shost); 722 lport = shost_priv(shost);
573 fc = lport_priv(lp); 723 port = lport_priv(lport);
724 port->lport = lport;
725 port->fcoe = fcoe;
726 INIT_WORK(&port->destroy_work, fcoe_destroy_work);
574 727
575 /* configure fc_lport, e.g., em */ 728 /* configure fc_lport, e.g., em */
576 rc = fcoe_lport_config(lp); 729 rc = fcoe_lport_config(lport);
577 if (rc) { 730 if (rc) {
578 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the " 731 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
579 "interface\n"); 732 "interface\n");
580 goto out_host_put; 733 goto out_host_put;
581 } 734 }
582 735
583 /*
584 * Initialize FIP.
585 */
586 fcoe_ctlr_init(&fc->ctlr);
587 fc->ctlr.send = fcoe_fip_send;
588 fc->ctlr.update_mac = fcoe_update_src_mac;
589
590 /* configure lport network properties */ 736 /* configure lport network properties */
591 rc = fcoe_netdev_config(lp, netdev); 737 rc = fcoe_netdev_config(lport, netdev);
592 if (rc) { 738 if (rc) {
593 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the " 739 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
594 "interface\n"); 740 "interface\n");
595 goto out_netdev_cleanup; 741 goto out_lp_destroy;
596 } 742 }
597 743
598 /* configure lport scsi host properties */ 744 /* configure lport scsi host properties */
599 rc = fcoe_shost_config(lp, shost, &netdev->dev); 745 rc = fcoe_shost_config(lport, shost, parent);
600 if (rc) { 746 if (rc) {
601 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " 747 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
602 "interface\n"); 748 "interface\n");
603 goto out_netdev_cleanup; 749 goto out_lp_destroy;
604 }
605
606 /* lport exch manager allocation */
607 rc = fcoe_em_config(lp);
608 if (rc) {
609 FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
610 "interface\n");
611 goto out_netdev_cleanup;
612 } 750 }
613 751
614 /* Initialize the library */ 752 /* Initialize the library */
615 rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ); 753 rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
616 if (rc) { 754 if (rc) {
617 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 755 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
618 "interface\n"); 756 "interface\n");
619 goto out_lp_destroy; 757 goto out_lp_destroy;
620 } 758 }
621 759
622 /* add to lports list */ 760 /*
623 fcoe_hostlist_add(lp); 761 * fcoe_em_alloc() and fcoe_hostlist_add() both
624 762 * need to be atomic with respect to other changes to the hostlist
625 lp->boot_time = jiffies; 763 * since fcoe_em_alloc() looks for an existing EM
626 764 * instance on host list updated by fcoe_hostlist_add().
627 fc_fabric_login(lp); 765 *
628 766 * This is currently handled through the fcoe_config_mutex begin held.
629 if (!fcoe_link_ok(lp)) 767 */
630 fcoe_ctlr_link_up(&fc->ctlr);
631 768
632 dev_hold(netdev); 769 /* lport exch manager allocation */
770 rc = fcoe_em_config(lport);
771 if (rc) {
772 FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
773 "interface\n");
774 goto out_lp_destroy;
775 }
633 776
634 return rc; 777 fcoe_interface_get(fcoe);
778 return lport;
635 779
636out_lp_destroy: 780out_lp_destroy:
637 fc_exch_mgr_free(lp->emp); /* Free the EM */ 781 fc_exch_mgr_free(lport);
638out_netdev_cleanup:
639 fcoe_netdev_cleanup(fc);
640out_host_put: 782out_host_put:
641 scsi_host_put(lp->host); 783 scsi_host_put(lport->host);
642 return rc; 784out:
785 return ERR_PTR(rc);
643} 786}
644 787
645/** 788/**
@@ -669,6 +812,7 @@ static int __init fcoe_if_init(void)
669int __exit fcoe_if_exit(void) 812int __exit fcoe_if_exit(void)
670{ 813{
671 fc_release_transport(scsi_transport_fcoe_sw); 814 fc_release_transport(scsi_transport_fcoe_sw);
815 scsi_transport_fcoe_sw = NULL;
672 return 0; 816 return 0;
673} 817}
674 818
@@ -686,7 +830,7 @@ static void fcoe_percpu_thread_create(unsigned int cpu)
686 thread = kthread_create(fcoe_percpu_receive_thread, 830 thread = kthread_create(fcoe_percpu_receive_thread,
687 (void *)p, "fcoethread/%d", cpu); 831 (void *)p, "fcoethread/%d", cpu);
688 832
689 if (likely(!IS_ERR(p->thread))) { 833 if (likely(!IS_ERR(thread))) {
690 kthread_bind(thread, cpu); 834 kthread_bind(thread, cpu);
691 wake_up_process(thread); 835 wake_up_process(thread);
692 836
@@ -838,14 +982,13 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
838{ 982{
839 struct fc_lport *lp; 983 struct fc_lport *lp;
840 struct fcoe_rcv_info *fr; 984 struct fcoe_rcv_info *fr;
841 struct fcoe_softc *fc; 985 struct fcoe_interface *fcoe;
842 struct fc_frame_header *fh; 986 struct fc_frame_header *fh;
843 struct fcoe_percpu_s *fps; 987 struct fcoe_percpu_s *fps;
844 unsigned short oxid; 988 unsigned int cpu;
845 unsigned int cpu = 0;
846 989
847 fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); 990 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
848 lp = fc->ctlr.lp; 991 lp = fcoe->ctlr.lp;
849 if (unlikely(lp == NULL)) { 992 if (unlikely(lp == NULL)) {
850 FCOE_NETDEV_DBG(dev, "Cannot find hba structure"); 993 FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
851 goto err2; 994 goto err2;
@@ -876,20 +1019,20 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
876 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 1019 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
877 fh = (struct fc_frame_header *) skb_transport_header(skb); 1020 fh = (struct fc_frame_header *) skb_transport_header(skb);
878 1021
879 oxid = ntohs(fh->fh_ox_id);
880
881 fr = fcoe_dev_from_skb(skb); 1022 fr = fcoe_dev_from_skb(skb);
882 fr->fr_dev = lp; 1023 fr->fr_dev = lp;
883 fr->ptype = ptype; 1024 fr->ptype = ptype;
884 1025
885#ifdef CONFIG_SMP
886 /* 1026 /*
887 * The incoming frame exchange id(oxid) is ANDed with num of online 1027 * In case the incoming frame's exchange is originated from
888 * cpu bits to get cpu and then this cpu is used for selecting 1028 * the initiator, then received frame's exchange id is ANDed
889 * a per cpu kernel thread from fcoe_percpu. 1029 * with fc_cpu_mask bits to get the same cpu on which exchange
1030 * was originated, otherwise just use the current cpu.
890 */ 1031 */
891 cpu = oxid & (num_online_cpus() - 1); 1032 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
892#endif 1033 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1034 else
1035 cpu = smp_processor_id();
893 1036
894 fps = &per_cpu(fcoe_percpu, cpu); 1037 fps = &per_cpu(fcoe_percpu, cpu);
895 spin_lock_bh(&fps->fcoe_rx_list.lock); 1038 spin_lock_bh(&fps->fcoe_rx_list.lock);
@@ -996,7 +1139,7 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
996 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb 1139 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
997 * @fp: the fc_frame containing data to be checksummed 1140 * @fp: the fc_frame containing data to be checksummed
998 * 1141 *
999 * This uses crc32() to calculate the crc for fc frame 1142 * This uses crc32() to calculate the crc for port frame
1000 * Return : 32 bit crc 1143 * Return : 32 bit crc
1001 */ 1144 */
1002u32 fcoe_fc_crc(struct fc_frame *fp) 1145u32 fcoe_fc_crc(struct fc_frame *fp)
@@ -1029,7 +1172,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
1029 1172
1030/** 1173/**
1031 * fcoe_xmit() - FCoE frame transmit function 1174 * fcoe_xmit() - FCoE frame transmit function
1032 * @lp: the associated local port 1175 * @lp: the associated local fcoe
1033 * @fp: the fc_frame to be transmitted 1176 * @fp: the fc_frame to be transmitted
1034 * 1177 *
1035 * Return : 0 for success 1178 * Return : 0 for success
@@ -1046,13 +1189,13 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1046 unsigned int hlen; /* header length implies the version */ 1189 unsigned int hlen; /* header length implies the version */
1047 unsigned int tlen; /* trailer length */ 1190 unsigned int tlen; /* trailer length */
1048 unsigned int elen; /* eth header, may include vlan */ 1191 unsigned int elen; /* eth header, may include vlan */
1049 struct fcoe_softc *fc; 1192 struct fcoe_port *port = lport_priv(lp);
1193 struct fcoe_interface *fcoe = port->fcoe;
1050 u8 sof, eof; 1194 u8 sof, eof;
1051 struct fcoe_hdr *hp; 1195 struct fcoe_hdr *hp;
1052 1196
1053 WARN_ON((fr_len(fp) % sizeof(u32)) != 0); 1197 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1054 1198
1055 fc = lport_priv(lp);
1056 fh = fc_frame_header_get(fp); 1199 fh = fc_frame_header_get(fp);
1057 skb = fp_skb(fp); 1200 skb = fp_skb(fp);
1058 wlen = skb->len / FCOE_WORD_TO_BYTE; 1201 wlen = skb->len / FCOE_WORD_TO_BYTE;
@@ -1063,7 +1206,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1063 } 1206 }
1064 1207
1065 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && 1208 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1066 fcoe_ctlr_els_send(&fc->ctlr, skb)) 1209 fcoe_ctlr_els_send(&fcoe->ctlr, skb))
1067 return 0; 1210 return 0;
1068 1211
1069 sof = fr_sof(fp); 1212 sof = fr_sof(fp);
@@ -1085,7 +1228,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1085 crc = fcoe_fc_crc(fp); 1228 crc = fcoe_fc_crc(fp);
1086 } 1229 }
1087 1230
1088 /* copy fc crc and eof to the skb buff */ 1231 /* copy port crc and eof to the skb buff */
1089 if (skb_is_nonlinear(skb)) { 1232 if (skb_is_nonlinear(skb)) {
1090 skb_frag_t *frag; 1233 skb_frag_t *frag;
1091 if (fcoe_get_paged_crc_eof(skb, tlen)) { 1234 if (fcoe_get_paged_crc_eof(skb, tlen)) {
@@ -1108,27 +1251,27 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1108 cp = NULL; 1251 cp = NULL;
1109 } 1252 }
1110 1253
1111 /* adjust skb network/transport offsets to match mac/fcoe/fc */ 1254 /* adjust skb network/transport offsets to match mac/fcoe/port */
1112 skb_push(skb, elen + hlen); 1255 skb_push(skb, elen + hlen);
1113 skb_reset_mac_header(skb); 1256 skb_reset_mac_header(skb);
1114 skb_reset_network_header(skb); 1257 skb_reset_network_header(skb);
1115 skb->mac_len = elen; 1258 skb->mac_len = elen;
1116 skb->protocol = htons(ETH_P_FCOE); 1259 skb->protocol = htons(ETH_P_FCOE);
1117 skb->dev = fc->real_dev; 1260 skb->dev = fcoe->netdev;
1118 1261
1119 /* fill up mac and fcoe headers */ 1262 /* fill up mac and fcoe headers */
1120 eh = eth_hdr(skb); 1263 eh = eth_hdr(skb);
1121 eh->h_proto = htons(ETH_P_FCOE); 1264 eh->h_proto = htons(ETH_P_FCOE);
1122 if (fc->ctlr.map_dest) 1265 if (fcoe->ctlr.map_dest)
1123 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 1266 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1124 else 1267 else
1125 /* insert GW address */ 1268 /* insert GW address */
1126 memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN); 1269 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1127 1270
1128 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1271 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1129 memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN); 1272 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1130 else 1273 else
1131 memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN); 1274 memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN);
1132 1275
1133 hp = (struct fcoe_hdr *)(eh + 1); 1276 hp = (struct fcoe_hdr *)(eh + 1);
1134 memset(hp, 0, sizeof(*hp)); 1277 memset(hp, 0, sizeof(*hp));
@@ -1136,7 +1279,6 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1136 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); 1279 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1137 hp->fcoe_sof = sof; 1280 hp->fcoe_sof = sof;
1138 1281
1139#ifdef NETIF_F_FSO
1140 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ 1282 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1141 if (lp->seq_offload && fr_max_payload(fp)) { 1283 if (lp->seq_offload && fr_max_payload(fp)) {
1142 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; 1284 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
@@ -1145,7 +1287,6 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1145 skb_shinfo(skb)->gso_type = 0; 1287 skb_shinfo(skb)->gso_type = 0;
1146 skb_shinfo(skb)->gso_size = 0; 1288 skb_shinfo(skb)->gso_size = 0;
1147 } 1289 }
1148#endif
1149 /* update tx stats: regardless if LLD fails */ 1290 /* update tx stats: regardless if LLD fails */
1150 stats = fc_lport_get_stats(lp); 1291 stats = fc_lport_get_stats(lp);
1151 stats->TxFrames++; 1292 stats->TxFrames++;
@@ -1153,7 +1294,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1153 1294
1154 /* send down to lld */ 1295 /* send down to lld */
1155 fr_dev(fp) = lp; 1296 fr_dev(fp) = lp;
1156 if (fc->fcoe_pending_queue.qlen) 1297 if (port->fcoe_pending_queue.qlen)
1157 fcoe_check_wait_queue(lp, skb); 1298 fcoe_check_wait_queue(lp, skb);
1158 else if (fcoe_start_io(skb)) 1299 else if (fcoe_start_io(skb))
1159 fcoe_check_wait_queue(lp, skb); 1300 fcoe_check_wait_queue(lp, skb);
@@ -1162,6 +1303,15 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1162} 1303}
1163 1304
1164/** 1305/**
1306 * fcoe_percpu_flush_done() - Indicate percpu queue flush completion.
1307 * @skb: the skb being completed.
1308 */
1309static void fcoe_percpu_flush_done(struct sk_buff *skb)
1310{
1311 complete(&fcoe_flush_completion);
1312}
1313
1314/**
1165 * fcoe_percpu_receive_thread() - recv thread per cpu 1315 * fcoe_percpu_receive_thread() - recv thread per cpu
1166 * @arg: ptr to the fcoe per cpu struct 1316 * @arg: ptr to the fcoe per cpu struct
1167 * 1317 *
@@ -1179,7 +1329,7 @@ int fcoe_percpu_receive_thread(void *arg)
1179 struct fcoe_crc_eof crc_eof; 1329 struct fcoe_crc_eof crc_eof;
1180 struct fc_frame *fp; 1330 struct fc_frame *fp;
1181 u8 *mac = NULL; 1331 u8 *mac = NULL;
1182 struct fcoe_softc *fc; 1332 struct fcoe_port *port;
1183 struct fcoe_hdr *hp; 1333 struct fcoe_hdr *hp;
1184 1334
1185 set_user_nice(current, -20); 1335 set_user_nice(current, -20);
@@ -1200,7 +1350,8 @@ int fcoe_percpu_receive_thread(void *arg)
1200 fr = fcoe_dev_from_skb(skb); 1350 fr = fcoe_dev_from_skb(skb);
1201 lp = fr->fr_dev; 1351 lp = fr->fr_dev;
1202 if (unlikely(lp == NULL)) { 1352 if (unlikely(lp == NULL)) {
1203 FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure"); 1353 if (skb->destructor != fcoe_percpu_flush_done)
1354 FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1204 kfree_skb(skb); 1355 kfree_skb(skb);
1205 continue; 1356 continue;
1206 } 1357 }
@@ -1215,7 +1366,7 @@ int fcoe_percpu_receive_thread(void *arg)
1215 /* 1366 /*
1216 * Save source MAC address before discarding header. 1367 * Save source MAC address before discarding header.
1217 */ 1368 */
1218 fc = lport_priv(lp); 1369 port = lport_priv(lp);
1219 if (skb_is_nonlinear(skb)) 1370 if (skb_is_nonlinear(skb))
1220 skb_linearize(skb); /* not ideal */ 1371 skb_linearize(skb); /* not ideal */
1221 mac = eth_hdr(skb)->h_source; 1372 mac = eth_hdr(skb)->h_source;
@@ -1277,7 +1428,7 @@ int fcoe_percpu_receive_thread(void *arg)
1277 fh = fc_frame_header_get(fp); 1428 fh = fc_frame_header_get(fp);
1278 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && 1429 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1279 fh->fh_type == FC_TYPE_FCP) { 1430 fh->fh_type == FC_TYPE_FCP) {
1280 fc_exch_recv(lp, lp->emp, fp); 1431 fc_exch_recv(lp, fp);
1281 continue; 1432 continue;
1282 } 1433 }
1283 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { 1434 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
@@ -1293,12 +1444,12 @@ int fcoe_percpu_receive_thread(void *arg)
1293 } 1444 }
1294 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1445 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1295 } 1446 }
1296 if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) && 1447 if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1297 fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) { 1448 fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) {
1298 fc_frame_free(fp); 1449 fc_frame_free(fp);
1299 continue; 1450 continue;
1300 } 1451 }
1301 fc_exch_recv(lp, lp->emp, fp); 1452 fc_exch_recv(lp, fp);
1302 } 1453 }
1303 return 0; 1454 return 0;
1304} 1455}
@@ -1318,46 +1469,46 @@ int fcoe_percpu_receive_thread(void *arg)
1318 */ 1469 */
1319static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) 1470static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1320{ 1471{
1321 struct fcoe_softc *fc = lport_priv(lp); 1472 struct fcoe_port *port = lport_priv(lp);
1322 int rc; 1473 int rc;
1323 1474
1324 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1475 spin_lock_bh(&port->fcoe_pending_queue.lock);
1325 1476
1326 if (skb) 1477 if (skb)
1327 __skb_queue_tail(&fc->fcoe_pending_queue, skb); 1478 __skb_queue_tail(&port->fcoe_pending_queue, skb);
1328 1479
1329 if (fc->fcoe_pending_queue_active) 1480 if (port->fcoe_pending_queue_active)
1330 goto out; 1481 goto out;
1331 fc->fcoe_pending_queue_active = 1; 1482 port->fcoe_pending_queue_active = 1;
1332 1483
1333 while (fc->fcoe_pending_queue.qlen) { 1484 while (port->fcoe_pending_queue.qlen) {
1334 /* keep qlen > 0 until fcoe_start_io succeeds */ 1485 /* keep qlen > 0 until fcoe_start_io succeeds */
1335 fc->fcoe_pending_queue.qlen++; 1486 port->fcoe_pending_queue.qlen++;
1336 skb = __skb_dequeue(&fc->fcoe_pending_queue); 1487 skb = __skb_dequeue(&port->fcoe_pending_queue);
1337 1488
1338 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1489 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1339 rc = fcoe_start_io(skb); 1490 rc = fcoe_start_io(skb);
1340 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1491 spin_lock_bh(&port->fcoe_pending_queue.lock);
1341 1492
1342 if (rc) { 1493 if (rc) {
1343 __skb_queue_head(&fc->fcoe_pending_queue, skb); 1494 __skb_queue_head(&port->fcoe_pending_queue, skb);
1344 /* undo temporary increment above */ 1495 /* undo temporary increment above */
1345 fc->fcoe_pending_queue.qlen--; 1496 port->fcoe_pending_queue.qlen--;
1346 break; 1497 break;
1347 } 1498 }
1348 /* undo temporary increment above */ 1499 /* undo temporary increment above */
1349 fc->fcoe_pending_queue.qlen--; 1500 port->fcoe_pending_queue.qlen--;
1350 } 1501 }
1351 1502
1352 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1503 if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1353 lp->qfull = 0; 1504 lp->qfull = 0;
1354 if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer)) 1505 if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
1355 mod_timer(&fc->timer, jiffies + 2); 1506 mod_timer(&port->timer, jiffies + 2);
1356 fc->fcoe_pending_queue_active = 0; 1507 port->fcoe_pending_queue_active = 0;
1357out: 1508out:
1358 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) 1509 if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1359 lp->qfull = 1; 1510 lp->qfull = 1;
1360 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1511 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1361 return; 1512 return;
1362} 1513}
1363 1514
@@ -1391,21 +1542,20 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1391 ulong event, void *ptr) 1542 ulong event, void *ptr)
1392{ 1543{
1393 struct fc_lport *lp = NULL; 1544 struct fc_lport *lp = NULL;
1394 struct net_device *real_dev = ptr; 1545 struct net_device *netdev = ptr;
1395 struct fcoe_softc *fc; 1546 struct fcoe_interface *fcoe;
1547 struct fcoe_port *port;
1396 struct fcoe_dev_stats *stats; 1548 struct fcoe_dev_stats *stats;
1397 u32 link_possible = 1; 1549 u32 link_possible = 1;
1398 u32 mfs; 1550 u32 mfs;
1399 int rc = NOTIFY_OK; 1551 int rc = NOTIFY_OK;
1400 1552
1401 read_lock(&fcoe_hostlist_lock); 1553 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1402 list_for_each_entry(fc, &fcoe_hostlist, list) { 1554 if (fcoe->netdev == netdev) {
1403 if (fc->real_dev == real_dev) { 1555 lp = fcoe->ctlr.lp;
1404 lp = fc->ctlr.lp;
1405 break; 1556 break;
1406 } 1557 }
1407 } 1558 }
1408 read_unlock(&fcoe_hostlist_lock);
1409 if (lp == NULL) { 1559 if (lp == NULL) {
1410 rc = NOTIFY_DONE; 1560 rc = NOTIFY_DONE;
1411 goto out; 1561 goto out;
@@ -1420,21 +1570,27 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1420 case NETDEV_CHANGE: 1570 case NETDEV_CHANGE:
1421 break; 1571 break;
1422 case NETDEV_CHANGEMTU: 1572 case NETDEV_CHANGEMTU:
1423 mfs = fc->real_dev->mtu - 1573 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1424 (sizeof(struct fcoe_hdr) + 1574 sizeof(struct fcoe_crc_eof));
1425 sizeof(struct fcoe_crc_eof));
1426 if (mfs >= FC_MIN_MAX_FRAME) 1575 if (mfs >= FC_MIN_MAX_FRAME)
1427 fc_set_mfs(lp, mfs); 1576 fc_set_mfs(lp, mfs);
1428 break; 1577 break;
1429 case NETDEV_REGISTER: 1578 case NETDEV_REGISTER:
1430 break; 1579 break;
1580 case NETDEV_UNREGISTER:
1581 list_del(&fcoe->list);
1582 port = lport_priv(fcoe->ctlr.lp);
1583 fcoe_interface_cleanup(fcoe);
1584 schedule_work(&port->destroy_work);
1585 goto out;
1586 break;
1431 default: 1587 default:
1432 FCOE_NETDEV_DBG(real_dev, "Unknown event %ld " 1588 FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1433 "from netdev netlink\n", event); 1589 "from netdev netlink\n", event);
1434 } 1590 }
1435 if (link_possible && !fcoe_link_ok(lp)) 1591 if (link_possible && !fcoe_link_ok(lp))
1436 fcoe_ctlr_link_up(&fc->ctlr); 1592 fcoe_ctlr_link_up(&fcoe->ctlr);
1437 else if (fcoe_ctlr_link_down(&fc->ctlr)) { 1593 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1438 stats = fc_lport_get_stats(lp); 1594 stats = fc_lport_get_stats(lp);
1439 stats->LinkFailureCount++; 1595 stats->LinkFailureCount++;
1440 fcoe_clean_pending_queue(lp); 1596 fcoe_clean_pending_queue(lp);
@@ -1465,75 +1621,6 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
1465} 1621}
1466 1622
1467/** 1623/**
1468 * fcoe_netdev_to_module_owner() - finds out the driver module of the netdev
1469 * @netdev: the target netdev
1470 *
1471 * Returns: ptr to the struct module, NULL for failure
1472 */
1473static struct module *
1474fcoe_netdev_to_module_owner(const struct net_device *netdev)
1475{
1476 struct device *dev;
1477
1478 if (!netdev)
1479 return NULL;
1480
1481 dev = netdev->dev.parent;
1482 if (!dev)
1483 return NULL;
1484
1485 if (!dev->driver)
1486 return NULL;
1487
1488 return dev->driver->owner;
1489}
1490
1491/**
1492 * fcoe_ethdrv_get() - Hold the Ethernet driver
1493 * @netdev: the target netdev
1494 *
1495 * Holds the Ethernet driver module by try_module_get() for
1496 * the corresponding netdev.
1497 *
1498 * Returns: 0 for success
1499 */
1500static int fcoe_ethdrv_get(const struct net_device *netdev)
1501{
1502 struct module *owner;
1503
1504 owner = fcoe_netdev_to_module_owner(netdev);
1505 if (owner) {
1506 FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
1507 module_name(owner));
1508 return try_module_get(owner);
1509 }
1510 return -ENODEV;
1511}
1512
1513/**
1514 * fcoe_ethdrv_put() - Release the Ethernet driver
1515 * @netdev: the target netdev
1516 *
1517 * Releases the Ethernet driver module by module_put for
1518 * the corresponding netdev.
1519 *
1520 * Returns: 0 for success
1521 */
1522static int fcoe_ethdrv_put(const struct net_device *netdev)
1523{
1524 struct module *owner;
1525
1526 owner = fcoe_netdev_to_module_owner(netdev);
1527 if (owner) {
1528 FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
1529 module_name(owner));
1530 module_put(owner);
1531 return 0;
1532 }
1533 return -ENODEV;
1534}
1535
1536/**
1537 * fcoe_destroy() - handles the destroy from sysfs 1624 * fcoe_destroy() - handles the destroy from sysfs
1538 * @buffer: expected to be an eth if name 1625 * @buffer: expected to be an eth if name
1539 * @kp: associated kernel param 1626 * @kp: associated kernel param
@@ -1542,34 +1629,57 @@ static int fcoe_ethdrv_put(const struct net_device *netdev)
1542 */ 1629 */
1543static int fcoe_destroy(const char *buffer, struct kernel_param *kp) 1630static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1544{ 1631{
1545 int rc; 1632 struct fcoe_interface *fcoe;
1546 struct net_device *netdev; 1633 struct net_device *netdev;
1634 int rc;
1635
1636 mutex_lock(&fcoe_config_mutex);
1637#ifdef CONFIG_FCOE_MODULE
1638 /*
1639 * Make sure the module has been initialized, and is not about to be
1640 * removed. Module paramter sysfs files are writable before the
1641 * module_init function is called and after module_exit.
1642 */
1643 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1644 rc = -ENODEV;
1645 goto out_nodev;
1646 }
1647#endif
1547 1648
1548 netdev = fcoe_if_to_netdev(buffer); 1649 netdev = fcoe_if_to_netdev(buffer);
1549 if (!netdev) { 1650 if (!netdev) {
1550 rc = -ENODEV; 1651 rc = -ENODEV;
1551 goto out_nodev; 1652 goto out_nodev;
1552 } 1653 }
1553 /* look for existing lport */ 1654
1554 if (!fcoe_hostlist_lookup(netdev)) { 1655 rtnl_lock();
1656 fcoe = fcoe_hostlist_lookup_port(netdev);
1657 if (!fcoe) {
1658 rtnl_unlock();
1555 rc = -ENODEV; 1659 rc = -ENODEV;
1556 goto out_putdev; 1660 goto out_putdev;
1557 } 1661 }
1558 rc = fcoe_if_destroy(netdev); 1662 list_del(&fcoe->list);
1559 if (rc) { 1663 fcoe_interface_cleanup(fcoe);
1560 printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n", 1664 rtnl_unlock();
1561 netdev->name); 1665 fcoe_if_destroy(fcoe->ctlr.lp);
1562 rc = -EIO;
1563 goto out_putdev;
1564 }
1565 fcoe_ethdrv_put(netdev);
1566 rc = 0;
1567out_putdev: 1666out_putdev:
1568 dev_put(netdev); 1667 dev_put(netdev);
1569out_nodev: 1668out_nodev:
1669 mutex_unlock(&fcoe_config_mutex);
1570 return rc; 1670 return rc;
1571} 1671}
1572 1672
1673static void fcoe_destroy_work(struct work_struct *work)
1674{
1675 struct fcoe_port *port;
1676
1677 port = container_of(work, struct fcoe_port, destroy_work);
1678 mutex_lock(&fcoe_config_mutex);
1679 fcoe_if_destroy(port->lport);
1680 mutex_unlock(&fcoe_config_mutex);
1681}
1682
1573/** 1683/**
1574 * fcoe_create() - Handles the create call from sysfs 1684 * fcoe_create() - Handles the create call from sysfs
1575 * @buffer: expected to be an eth if name 1685 * @buffer: expected to be an eth if name
@@ -1580,41 +1690,84 @@ out_nodev:
1580static int fcoe_create(const char *buffer, struct kernel_param *kp) 1690static int fcoe_create(const char *buffer, struct kernel_param *kp)
1581{ 1691{
1582 int rc; 1692 int rc;
1693 struct fcoe_interface *fcoe;
1694 struct fc_lport *lport;
1583 struct net_device *netdev; 1695 struct net_device *netdev;
1584 1696
1697 mutex_lock(&fcoe_config_mutex);
1698#ifdef CONFIG_FCOE_MODULE
1699 /*
1700 * Make sure the module has been initialized, and is not about to be
1701 * removed. Module paramter sysfs files are writable before the
1702 * module_init function is called and after module_exit.
1703 */
1704 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1705 rc = -ENODEV;
1706 goto out_nodev;
1707 }
1708#endif
1709
1710 rtnl_lock();
1585 netdev = fcoe_if_to_netdev(buffer); 1711 netdev = fcoe_if_to_netdev(buffer);
1586 if (!netdev) { 1712 if (!netdev) {
1587 rc = -ENODEV; 1713 rc = -ENODEV;
1588 goto out_nodev; 1714 goto out_nodev;
1589 } 1715 }
1716
1590 /* look for existing lport */ 1717 /* look for existing lport */
1591 if (fcoe_hostlist_lookup(netdev)) { 1718 if (fcoe_hostlist_lookup(netdev)) {
1592 rc = -EEXIST; 1719 rc = -EEXIST;
1593 goto out_putdev; 1720 goto out_putdev;
1594 } 1721 }
1595 fcoe_ethdrv_get(netdev);
1596 1722
1597 rc = fcoe_if_create(netdev); 1723 fcoe = fcoe_interface_create(netdev);
1598 if (rc) { 1724 if (!fcoe) {
1725 rc = -ENOMEM;
1726 goto out_putdev;
1727 }
1728
1729 lport = fcoe_if_create(fcoe, &netdev->dev);
1730 if (IS_ERR(lport)) {
1599 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 1731 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
1600 netdev->name); 1732 netdev->name);
1601 fcoe_ethdrv_put(netdev);
1602 rc = -EIO; 1733 rc = -EIO;
1603 goto out_putdev; 1734 fcoe_interface_cleanup(fcoe);
1735 goto out_free;
1604 } 1736 }
1737
1738 /* Make this the "master" N_Port */
1739 fcoe->ctlr.lp = lport;
1740
1741 /* add to lports list */
1742 fcoe_hostlist_add(lport);
1743
1744 /* start FIP Discovery and FLOGI */
1745 lport->boot_time = jiffies;
1746 fc_fabric_login(lport);
1747 if (!fcoe_link_ok(lport))
1748 fcoe_ctlr_link_up(&fcoe->ctlr);
1749
1605 rc = 0; 1750 rc = 0;
1751out_free:
1752 /*
1753 * Release from init in fcoe_interface_create(), on success lport
1754 * should be holding a reference taken in fcoe_if_create().
1755 */
1756 fcoe_interface_put(fcoe);
1606out_putdev: 1757out_putdev:
1607 dev_put(netdev); 1758 dev_put(netdev);
1608out_nodev: 1759out_nodev:
1760 rtnl_unlock();
1761 mutex_unlock(&fcoe_config_mutex);
1609 return rc; 1762 return rc;
1610} 1763}
1611 1764
1612module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); 1765module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1613__MODULE_PARM_TYPE(create, "string"); 1766__MODULE_PARM_TYPE(create, "string");
1614MODULE_PARM_DESC(create, "Create fcoe port using net device passed in."); 1767MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
1615module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); 1768module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1616__MODULE_PARM_TYPE(destroy, "string"); 1769__MODULE_PARM_TYPE(destroy, "string");
1617MODULE_PARM_DESC(destroy, "Destroy fcoe port"); 1770MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
1618 1771
1619/** 1772/**
1620 * fcoe_link_ok() - Check if link is ok for the fc_lport 1773 * fcoe_link_ok() - Check if link is ok for the fc_lport
@@ -1632,37 +1785,40 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1632 */ 1785 */
1633int fcoe_link_ok(struct fc_lport *lp) 1786int fcoe_link_ok(struct fc_lport *lp)
1634{ 1787{
1635 struct fcoe_softc *fc = lport_priv(lp); 1788 struct fcoe_port *port = lport_priv(lp);
1636 struct net_device *dev = fc->real_dev; 1789 struct net_device *dev = port->fcoe->netdev;
1637 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 1790 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1638 int rc = 0;
1639 1791
1640 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { 1792 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
1641 dev = fc->phys_dev; 1793 (!dev_ethtool_get_settings(dev, &ecmd))) {
1642 if (dev->ethtool_ops->get_settings) { 1794 lp->link_supported_speeds &=
1643 dev->ethtool_ops->get_settings(dev, &ecmd); 1795 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1644 lp->link_supported_speeds &= 1796 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1645 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 1797 SUPPORTED_1000baseT_Full))
1646 if (ecmd.supported & (SUPPORTED_1000baseT_Half | 1798 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1647 SUPPORTED_1000baseT_Full)) 1799 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1648 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; 1800 lp->link_supported_speeds |=
1649 if (ecmd.supported & SUPPORTED_10000baseT_Full) 1801 FC_PORTSPEED_10GBIT;
1650 lp->link_supported_speeds |= 1802 if (ecmd.speed == SPEED_1000)
1651 FC_PORTSPEED_10GBIT; 1803 lp->link_speed = FC_PORTSPEED_1GBIT;
1652 if (ecmd.speed == SPEED_1000) 1804 if (ecmd.speed == SPEED_10000)
1653 lp->link_speed = FC_PORTSPEED_1GBIT; 1805 lp->link_speed = FC_PORTSPEED_10GBIT;
1654 if (ecmd.speed == SPEED_10000)
1655 lp->link_speed = FC_PORTSPEED_10GBIT;
1656 }
1657 } else
1658 rc = -1;
1659 1806
1660 return rc; 1807 return 0;
1808 }
1809 return -1;
1661} 1810}
1662 1811
1663/** 1812/**
1664 * fcoe_percpu_clean() - Clear the pending skbs for an lport 1813 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1665 * @lp: the fc_lport 1814 * @lp: the fc_lport
1815 *
1816 * Must be called with fcoe_create_mutex held to single-thread completion.
1817 *
1818 * This flushes the pending skbs by adding a new skb to each queue and
1819 * waiting until they are all freed. This assures us that not only are
1820 * there no packets that will be handled by the lport, but also that any
1821 * threads already handling packet have returned.
1666 */ 1822 */
1667void fcoe_percpu_clean(struct fc_lport *lp) 1823void fcoe_percpu_clean(struct fc_lport *lp)
1668{ 1824{
@@ -1687,7 +1843,25 @@ void fcoe_percpu_clean(struct fc_lport *lp)
1687 kfree_skb(skb); 1843 kfree_skb(skb);
1688 } 1844 }
1689 } 1845 }
1846
1847 if (!pp->thread || !cpu_online(cpu)) {
1848 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1849 continue;
1850 }
1851
1852 skb = dev_alloc_skb(0);
1853 if (!skb) {
1854 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1855 continue;
1856 }
1857 skb->destructor = fcoe_percpu_flush_done;
1858
1859 __skb_queue_tail(&pp->fcoe_rx_list, skb);
1860 if (pp->fcoe_rx_list.qlen == 1)
1861 wake_up_process(pp->thread);
1690 spin_unlock_bh(&pp->fcoe_rx_list.lock); 1862 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1863
1864 wait_for_completion(&fcoe_flush_completion);
1691 } 1865 }
1692} 1866}
1693 1867
@@ -1699,16 +1873,16 @@ void fcoe_percpu_clean(struct fc_lport *lp)
1699 */ 1873 */
1700void fcoe_clean_pending_queue(struct fc_lport *lp) 1874void fcoe_clean_pending_queue(struct fc_lport *lp)
1701{ 1875{
1702 struct fcoe_softc *fc = lport_priv(lp); 1876 struct fcoe_port *port = lport_priv(lp);
1703 struct sk_buff *skb; 1877 struct sk_buff *skb;
1704 1878
1705 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1879 spin_lock_bh(&port->fcoe_pending_queue.lock);
1706 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { 1880 while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
1707 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1881 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1708 kfree_skb(skb); 1882 kfree_skb(skb);
1709 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1883 spin_lock_bh(&port->fcoe_pending_queue.lock);
1710 } 1884 }
1711 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1885 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1712} 1886}
1713 1887
1714/** 1888/**
@@ -1725,24 +1899,21 @@ int fcoe_reset(struct Scsi_Host *shost)
1725} 1899}
1726 1900
1727/** 1901/**
1728 * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device 1902 * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
1729 * @dev: this is currently ptr to net_device 1903 * @dev: this is currently ptr to net_device
1730 * 1904 *
1731 * Returns: NULL or the located fcoe_softc 1905 * Returns: NULL or the located fcoe_port
1906 * Locking: must be called with the RNL mutex held
1732 */ 1907 */
1733static struct fcoe_softc * 1908static struct fcoe_interface *
1734fcoe_hostlist_lookup_softc(const struct net_device *dev) 1909fcoe_hostlist_lookup_port(const struct net_device *dev)
1735{ 1910{
1736 struct fcoe_softc *fc; 1911 struct fcoe_interface *fcoe;
1737 1912
1738 read_lock(&fcoe_hostlist_lock); 1913 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1739 list_for_each_entry(fc, &fcoe_hostlist, list) { 1914 if (fcoe->netdev == dev)
1740 if (fc->real_dev == dev) { 1915 return fcoe;
1741 read_unlock(&fcoe_hostlist_lock);
1742 return fc;
1743 }
1744 } 1916 }
1745 read_unlock(&fcoe_hostlist_lock);
1746 return NULL; 1917 return NULL;
1747} 1918}
1748 1919
@@ -1751,14 +1922,14 @@ fcoe_hostlist_lookup_softc(const struct net_device *dev)
1751 * @netdev: ptr to net_device 1922 * @netdev: ptr to net_device
1752 * 1923 *
1753 * Returns: 0 for success 1924 * Returns: 0 for success
1925 * Locking: must be called with the RTNL mutex held
1754 */ 1926 */
1755struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 1927static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1756{ 1928{
1757 struct fcoe_softc *fc; 1929 struct fcoe_interface *fcoe;
1758
1759 fc = fcoe_hostlist_lookup_softc(netdev);
1760 1930
1761 return (fc) ? fc->ctlr.lp : NULL; 1931 fcoe = fcoe_hostlist_lookup_port(netdev);
1932 return (fcoe) ? fcoe->ctlr.lp : NULL;
1762} 1933}
1763 1934
1764/** 1935/**
@@ -1766,41 +1937,23 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1766 * @lp: ptr to the fc_lport to be added 1937 * @lp: ptr to the fc_lport to be added
1767 * 1938 *
1768 * Returns: 0 for success 1939 * Returns: 0 for success
1940 * Locking: must be called with the RTNL mutex held
1769 */ 1941 */
1770int fcoe_hostlist_add(const struct fc_lport *lp) 1942static int fcoe_hostlist_add(const struct fc_lport *lport)
1771{ 1943{
1772 struct fcoe_softc *fc; 1944 struct fcoe_interface *fcoe;
1773 1945 struct fcoe_port *port;
1774 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); 1946
1775 if (!fc) { 1947 fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
1776 fc = lport_priv(lp); 1948 if (!fcoe) {
1777 write_lock_bh(&fcoe_hostlist_lock); 1949 port = lport_priv(lport);
1778 list_add_tail(&fc->list, &fcoe_hostlist); 1950 fcoe = port->fcoe;
1779 write_unlock_bh(&fcoe_hostlist_lock); 1951 list_add_tail(&fcoe->list, &fcoe_hostlist);
1780 } 1952 }
1781 return 0; 1953 return 0;
1782} 1954}
1783 1955
1784/** 1956/**
1785 * fcoe_hostlist_remove() - remove a lport from lports list
1786 * @lp: ptr to the fc_lport to be removed
1787 *
1788 * Returns: 0 for success
1789 */
1790int fcoe_hostlist_remove(const struct fc_lport *lp)
1791{
1792 struct fcoe_softc *fc;
1793
1794 fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1795 BUG_ON(!fc);
1796 write_lock_bh(&fcoe_hostlist_lock);
1797 list_del(&fc->list);
1798 write_unlock_bh(&fcoe_hostlist_lock);
1799
1800 return 0;
1801}
1802
1803/**
1804 * fcoe_init() - fcoe module loading initialization 1957 * fcoe_init() - fcoe module loading initialization
1805 * 1958 *
1806 * Returns 0 on success, negative on failure 1959 * Returns 0 on success, negative on failure
@@ -1811,8 +1964,7 @@ static int __init fcoe_init(void)
1811 int rc = 0; 1964 int rc = 0;
1812 struct fcoe_percpu_s *p; 1965 struct fcoe_percpu_s *p;
1813 1966
1814 INIT_LIST_HEAD(&fcoe_hostlist); 1967 mutex_lock(&fcoe_config_mutex);
1815 rwlock_init(&fcoe_hostlist_lock);
1816 1968
1817 for_each_possible_cpu(cpu) { 1969 for_each_possible_cpu(cpu) {
1818 p = &per_cpu(fcoe_percpu, cpu); 1970 p = &per_cpu(fcoe_percpu, cpu);
@@ -1830,15 +1982,18 @@ static int __init fcoe_init(void)
1830 /* Setup link change notification */ 1982 /* Setup link change notification */
1831 fcoe_dev_setup(); 1983 fcoe_dev_setup();
1832 1984
1833 fcoe_if_init(); 1985 rc = fcoe_if_init();
1986 if (rc)
1987 goto out_free;
1834 1988
1989 mutex_unlock(&fcoe_config_mutex);
1835 return 0; 1990 return 0;
1836 1991
1837out_free: 1992out_free:
1838 for_each_online_cpu(cpu) { 1993 for_each_online_cpu(cpu) {
1839 fcoe_percpu_thread_destroy(cpu); 1994 fcoe_percpu_thread_destroy(cpu);
1840 } 1995 }
1841 1996 mutex_unlock(&fcoe_config_mutex);
1842 return rc; 1997 return rc;
1843} 1998}
1844module_init(fcoe_init); 1999module_init(fcoe_init);
@@ -1851,21 +2006,36 @@ module_init(fcoe_init);
1851static void __exit fcoe_exit(void) 2006static void __exit fcoe_exit(void)
1852{ 2007{
1853 unsigned int cpu; 2008 unsigned int cpu;
1854 struct fcoe_softc *fc, *tmp; 2009 struct fcoe_interface *fcoe, *tmp;
2010 struct fcoe_port *port;
2011
2012 mutex_lock(&fcoe_config_mutex);
1855 2013
1856 fcoe_dev_cleanup(); 2014 fcoe_dev_cleanup();
1857 2015
1858 /* releases the associated fcoe hosts */ 2016 /* releases the associated fcoe hosts */
1859 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) 2017 rtnl_lock();
1860 fcoe_if_destroy(fc->real_dev); 2018 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2019 list_del(&fcoe->list);
2020 port = lport_priv(fcoe->ctlr.lp);
2021 fcoe_interface_cleanup(fcoe);
2022 schedule_work(&port->destroy_work);
2023 }
2024 rtnl_unlock();
1861 2025
1862 unregister_hotcpu_notifier(&fcoe_cpu_notifier); 2026 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
1863 2027
1864 for_each_online_cpu(cpu) { 2028 for_each_online_cpu(cpu)
1865 fcoe_percpu_thread_destroy(cpu); 2029 fcoe_percpu_thread_destroy(cpu);
1866 }
1867 2030
1868 /* detach from scsi transport */ 2031 mutex_unlock(&fcoe_config_mutex);
2032
2033 /* flush any asyncronous interface destroys,
2034 * this should happen after the netdev notifier is unregistered */
2035 flush_scheduled_work();
2036
2037 /* detach from scsi transport
2038 * must happen after all destroys are done, therefor after the flush */
1869 fcoe_if_exit(); 2039 fcoe_if_exit();
1870} 2040}
1871module_exit(fcoe_exit); 2041module_exit(fcoe_exit);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 0d724fa0898f..ce7f60fb1bc0 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -37,8 +37,8 @@
37 37
38#define FCOE_MAX_OUTSTANDING_COMMANDS 1024 38#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
39 39
40#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */ 40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
41#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */ 41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
42 42
43unsigned int fcoe_debug_logging; 43unsigned int fcoe_debug_logging;
44module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); 44module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
@@ -53,7 +53,7 @@ do { \
53 do { \ 53 do { \
54 CMD; \ 54 CMD; \
55 } while (0); \ 55 } while (0); \
56} while (0); 56} while (0)
57 57
58#define FCOE_DBG(fmt, args...) \ 58#define FCOE_DBG(fmt, args...) \
59 FCOE_CHECK_LOGGING(FCOE_LOGGING, \ 59 FCOE_CHECK_LOGGING(FCOE_LOGGING, \
@@ -61,7 +61,7 @@ do { \
61 61
62#define FCOE_NETDEV_DBG(netdev, fmt, args...) \ 62#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
63 FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \ 63 FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
64 printk(KERN_INFO "fcoe: %s" fmt, \ 64 printk(KERN_INFO "fcoe: %s: " fmt, \
65 netdev->name, ##args);) 65 netdev->name, ##args);)
66 66
67/* 67/*
@@ -75,26 +75,36 @@ struct fcoe_percpu_s {
75}; 75};
76 76
77/* 77/*
78 * the fcoe sw transport private data 78 * an FCoE interface, 1:1 with netdev
79 */ 79 */
80struct fcoe_softc { 80struct fcoe_interface {
81 struct list_head list; 81 struct list_head list;
82 struct net_device *real_dev; 82 struct net_device *netdev;
83 struct net_device *phys_dev; /* device with ethtool_ops */
84 struct packet_type fcoe_packet_type; 83 struct packet_type fcoe_packet_type;
85 struct packet_type fip_packet_type; 84 struct packet_type fip_packet_type;
85 struct fcoe_ctlr ctlr;
86 struct fc_exch_mgr *oem; /* offload exchange manager */
87 struct kref kref;
88};
89
90/*
91 * the FCoE private structure that's allocated along with the
92 * Scsi_Host and libfc fc_lport structures
93 */
94struct fcoe_port {
95 struct fcoe_interface *fcoe;
96 struct fc_lport *lport;
86 struct sk_buff_head fcoe_pending_queue; 97 struct sk_buff_head fcoe_pending_queue;
87 u8 fcoe_pending_queue_active; 98 u8 fcoe_pending_queue_active;
88 struct timer_list timer; /* queue timer */ 99 struct timer_list timer; /* queue timer */
89 struct fcoe_ctlr ctlr; 100 struct work_struct destroy_work; /* to prevent rtnl deadlocks */
90}; 101};
91 102
92#define fcoe_from_ctlr(fc) container_of(fc, struct fcoe_softc, ctlr) 103#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
93 104
94static inline struct net_device *fcoe_netdev( 105static inline struct net_device *fcoe_netdev(const struct fc_lport *lp)
95 const struct fc_lport *lp)
96{ 106{
97 return ((struct fcoe_softc *)lport_priv(lp))->real_dev; 107 return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev;
98} 108}
99 109
100#endif /* _FCOE_H_ */ 110#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index f544340d318b..62a4c2026072 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -69,7 +69,7 @@ do { \
69 do { \ 69 do { \
70 CMD; \ 70 CMD; \
71 } while (0); \ 71 } while (0); \
72} while (0); 72} while (0)
73 73
74#define LIBFCOE_DBG(fmt, args...) \ 74#define LIBFCOE_DBG(fmt, args...) \
75 LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ 75 LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
@@ -148,13 +148,17 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
148 */ 148 */
149void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) 149void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
150{ 150{
151 flush_work(&fip->recv_work); 151 cancel_work_sync(&fip->recv_work);
152 spin_lock_bh(&fip->fip_recv_list.lock);
153 __skb_queue_purge(&fip->fip_recv_list);
154 spin_unlock_bh(&fip->fip_recv_list.lock);
155
152 spin_lock_bh(&fip->lock); 156 spin_lock_bh(&fip->lock);
153 fip->state = FIP_ST_DISABLED; 157 fip->state = FIP_ST_DISABLED;
154 fcoe_ctlr_reset_fcfs(fip); 158 fcoe_ctlr_reset_fcfs(fip);
155 spin_unlock_bh(&fip->lock); 159 spin_unlock_bh(&fip->lock);
156 del_timer_sync(&fip->timer); 160 del_timer_sync(&fip->timer);
157 flush_work(&fip->link_work); 161 cancel_work_sync(&fip->link_work);
158} 162}
159EXPORT_SYMBOL(fcoe_ctlr_destroy); 163EXPORT_SYMBOL(fcoe_ctlr_destroy);
160 164
@@ -413,10 +417,18 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
413 struct fip_mac_desc *mac; 417 struct fip_mac_desc *mac;
414 struct fcoe_fcf *fcf; 418 struct fcoe_fcf *fcf;
415 size_t dlen; 419 size_t dlen;
420 u16 fip_flags;
416 421
417 fcf = fip->sel_fcf; 422 fcf = fip->sel_fcf;
418 if (!fcf) 423 if (!fcf)
419 return -ENODEV; 424 return -ENODEV;
425
426 /* set flags according to both FCF and lport's capability on SPMA */
427 fip_flags = fcf->flags;
428 fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : FIP_FL_FPMA;
429 if (!fip_flags)
430 return -ENODEV;
431
420 dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ 432 dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
421 cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap)); 433 cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
422 434
@@ -429,9 +441,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
429 cap->fip.fip_op = htons(FIP_OP_LS); 441 cap->fip.fip_op = htons(FIP_OP_LS);
430 cap->fip.fip_subcode = FIP_SC_REQ; 442 cap->fip.fip_subcode = FIP_SC_REQ;
431 cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW); 443 cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW);
432 cap->fip.fip_flags = htons(FIP_FL_FPMA); 444 cap->fip.fip_flags = htons(fip_flags);
433 if (fip->spma)
434 cap->fip.fip_flags |= htons(FIP_FL_SPMA);
435 445
436 cap->encaps.fd_desc.fip_dtype = dtype; 446 cap->encaps.fd_desc.fip_dtype = dtype;
437 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; 447 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
@@ -879,7 +889,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
879 stats->RxFrames++; 889 stats->RxFrames++;
880 stats->RxWords += skb->len / FIP_BPW; 890 stats->RxWords += skb->len / FIP_BPW;
881 891
882 fc_exch_recv(lp, lp->emp, fp); 892 fc_exch_recv(lp, fp);
883 return; 893 return;
884 894
885len_err: 895len_err:
@@ -1104,7 +1114,6 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1104 struct fcoe_fcf *sel; 1114 struct fcoe_fcf *sel;
1105 struct fcoe_fcf *fcf; 1115 struct fcoe_fcf *fcf;
1106 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); 1116 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
1107 DECLARE_MAC_BUF(buf);
1108 u8 send_ctlr_ka; 1117 u8 send_ctlr_ka;
1109 u8 send_port_ka; 1118 u8 send_port_ka;
1110 1119
@@ -1128,9 +1137,8 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1128 fcf = sel; /* the old FCF may have been freed */ 1137 fcf = sel; /* the old FCF may have been freed */
1129 if (sel) { 1138 if (sel) {
1130 printk(KERN_INFO "libfcoe: host%d: FIP selected " 1139 printk(KERN_INFO "libfcoe: host%d: FIP selected "
1131 "Fibre-Channel Forwarder MAC %s\n", 1140 "Fibre-Channel Forwarder MAC %pM\n",
1132 fip->lp->host->host_no, 1141 fip->lp->host->host_no, sel->fcf_mac);
1133 print_mac(buf, sel->fcf_mac));
1134 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); 1142 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
1135 fip->port_ka_time = jiffies + 1143 fip->port_ka_time = jiffies +
1136 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1144 msecs_to_jiffies(FIP_VN_KA_PERIOD);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 07e6eedb83ce..50db3e36a619 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -115,7 +115,7 @@ void fnic_handle_frame(struct work_struct *work)
115 } 115 }
116 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 116 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
117 117
118 fc_exch_recv(lp, lp->emp, fp); 118 fc_exch_recv(lp, fp);
119 } 119 }
120 120
121} 121}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 2c266c01dc5a..71c7bbe26d05 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -671,14 +671,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
671 lp->link_up = 0; 671 lp->link_up = 0;
672 lp->tt = fnic_transport_template; 672 lp->tt = fnic_transport_template;
673 673
674 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
675 FCPIO_HOST_EXCH_RANGE_START,
676 FCPIO_HOST_EXCH_RANGE_END);
677 if (!lp->emp) {
678 err = -ENOMEM;
679 goto err_out_remove_scsi_host;
680 }
681
682 lp->max_retry_count = fnic->config.flogi_retries; 674 lp->max_retry_count = fnic->config.flogi_retries;
683 lp->max_rport_retry_count = fnic->config.plogi_retries; 675 lp->max_rport_retry_count = fnic->config.plogi_retries;
684 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 676 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -693,12 +685,18 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
693 fc_set_wwnn(lp, fnic->config.node_wwn); 685 fc_set_wwnn(lp, fnic->config.node_wwn);
694 fc_set_wwpn(lp, fnic->config.port_wwn); 686 fc_set_wwpn(lp, fnic->config.port_wwn);
695 687
696 fc_exch_init(lp);
697 fc_lport_init(lp); 688 fc_lport_init(lp);
689 fc_exch_init(lp);
698 fc_elsct_init(lp); 690 fc_elsct_init(lp);
699 fc_rport_init(lp); 691 fc_rport_init(lp);
700 fc_disc_init(lp); 692 fc_disc_init(lp);
701 693
694 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
695 FCPIO_HOST_EXCH_RANGE_END, NULL)) {
696 err = -ENOMEM;
697 goto err_out_remove_scsi_host;
698 }
699
702 fc_lport_config(lp); 700 fc_lport_config(lp);
703 701
704 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + 702 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
@@ -738,7 +736,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
738 return 0; 736 return 0;
739 737
740err_out_free_exch_mgr: 738err_out_free_exch_mgr:
741 fc_exch_mgr_free(lp->emp); 739 fc_exch_mgr_free(lp);
742err_out_remove_scsi_host: 740err_out_remove_scsi_host:
743 fc_remove_host(fnic->lport->host); 741 fc_remove_host(fnic->lport->host);
744 scsi_remove_host(fnic->lport->host); 742 scsi_remove_host(fnic->lport->host);
@@ -827,7 +825,7 @@ static void __devexit fnic_remove(struct pci_dev *pdev)
827 825
828 fc_remove_host(fnic->lport->host); 826 fc_remove_host(fnic->lport->host);
829 scsi_remove_host(fnic->lport->host); 827 scsi_remove_host(fnic->lport->host);
830 fc_exch_mgr_free(fnic->lport->emp); 828 fc_exch_mgr_free(fnic->lport);
831 vnic_dev_notify_unset(fnic->vdev); 829 vnic_dev_notify_unset(fnic->vdev);
832 fnic_free_vnic_resources(fnic); 830 fnic_free_vnic_resources(fnic);
833 fnic_free_intr(fnic); 831 fnic_free_intr(fnic);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 166d96450a0e..bb2c696c006a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4217,7 +4217,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
4217 if (!vhost->trace) 4217 if (!vhost->trace)
4218 goto free_disc_buffer; 4218 goto free_disc_buffer;
4219 4219
4220 vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ, 4220 vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
4221 sizeof(struct ibmvfc_target)); 4221 sizeof(struct ibmvfc_target));
4222 4222
4223 if (!vhost->tgt_pool) { 4223 if (!vhost->tgt_pool) {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4b63dd6b1c81..163245a1c3e5 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1199,7 +1199,7 @@ struct ipr_ioa_cfg {
1199 1199
1200 struct ata_host ata_host; 1200 struct ata_host ata_host;
1201 char ipr_cmd_label[8]; 1201 char ipr_cmd_label[8];
1202#define IPR_CMD_LABEL "ipr_cmnd" 1202#define IPR_CMD_LABEL "ipr_cmd"
1203 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; 1203 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
1204 u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; 1204 u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
1205}; 1205};
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 518dbd91df85..2b1b834a098b 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -99,6 +99,27 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
99 return total_consumed; 99 return total_consumed;
100} 100}
101 101
102/**
103 * iscsi_sw_sk_state_check - check socket state
104 * @sk: socket
105 *
106 * If the socket is in CLOSE or CLOSE_WAIT we should
107 * not close the connection if there is still some
108 * data pending.
109 */
110static inline int iscsi_sw_sk_state_check(struct sock *sk)
111{
112 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
113
114 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
115 !atomic_read(&sk->sk_rmem_alloc)) {
116 ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
117 iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
118 return -ECONNRESET;
119 }
120 return 0;
121}
122
102static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) 123static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
103{ 124{
104 struct iscsi_conn *conn = sk->sk_user_data; 125 struct iscsi_conn *conn = sk->sk_user_data;
@@ -117,6 +138,8 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
117 rd_desc.count = 1; 138 rd_desc.count = 1;
118 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv); 139 tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
119 140
141 iscsi_sw_sk_state_check(sk);
142
120 read_unlock(&sk->sk_callback_lock); 143 read_unlock(&sk->sk_callback_lock);
121 144
122 /* If we had to (atomically) map a highmem page, 145 /* If we had to (atomically) map a highmem page,
@@ -137,13 +160,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
137 conn = (struct iscsi_conn*)sk->sk_user_data; 160 conn = (struct iscsi_conn*)sk->sk_user_data;
138 session = conn->session; 161 session = conn->session;
139 162
140 if ((sk->sk_state == TCP_CLOSE_WAIT || 163 iscsi_sw_sk_state_check(sk);
141 sk->sk_state == TCP_CLOSE) &&
142 !atomic_read(&sk->sk_rmem_alloc)) {
143 ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_state_change: "
144 "TCP_CLOSE|TCP_CLOSE_WAIT\n");
145 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
146 }
147 164
148 tcp_conn = conn->dd_data; 165 tcp_conn = conn->dd_data;
149 tcp_sw_conn = tcp_conn->dd_data; 166 tcp_sw_conn = tcp_conn->dd_data;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 6fabf66972b9..c48799e9dd8e 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -43,47 +43,14 @@
43#define FC_DISC_RETRY_LIMIT 3 /* max retries */ 43#define FC_DISC_RETRY_LIMIT 3 /* max retries */
44#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */ 44#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45 45
46#define FC_DISC_DELAY 3
47
48static void fc_disc_gpn_ft_req(struct fc_disc *); 46static void fc_disc_gpn_ft_req(struct fc_disc *);
49static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *); 47static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
50static int fc_disc_new_target(struct fc_disc *, struct fc_rport *, 48static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
51 struct fc_rport_identifiers *);
52static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
53static void fc_disc_done(struct fc_disc *);
54static void fc_disc_timeout(struct work_struct *); 49static void fc_disc_timeout(struct work_struct *);
55static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); 50static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
56static void fc_disc_restart(struct fc_disc *); 51static void fc_disc_restart(struct fc_disc *);
57 52
58/** 53/**
59 * fc_disc_lookup_rport() - lookup a remote port by port_id
60 * @lport: Fibre Channel host port instance
61 * @port_id: remote port port_id to match
62 */
63struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
64 u32 port_id)
65{
66 const struct fc_disc *disc = &lport->disc;
67 struct fc_rport *rport, *found = NULL;
68 struct fc_rport_libfc_priv *rdata;
69 int disc_found = 0;
70
71 list_for_each_entry(rdata, &disc->rports, peers) {
72 rport = PRIV_TO_RPORT(rdata);
73 if (rport->port_id == port_id) {
74 disc_found = 1;
75 found = rport;
76 break;
77 }
78 }
79
80 if (!disc_found)
81 found = NULL;
82
83 return found;
84}
85
86/**
87 * fc_disc_stop_rports() - delete all the remote ports associated with the lport 54 * fc_disc_stop_rports() - delete all the remote ports associated with the lport
88 * @disc: The discovery job to stop rports on 55 * @disc: The discovery job to stop rports on
89 * 56 *
@@ -93,70 +60,17 @@ struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
93void fc_disc_stop_rports(struct fc_disc *disc) 60void fc_disc_stop_rports(struct fc_disc *disc)
94{ 61{
95 struct fc_lport *lport; 62 struct fc_lport *lport;
96 struct fc_rport *rport; 63 struct fc_rport_priv *rdata, *next;
97 struct fc_rport_libfc_priv *rdata, *next;
98 64
99 lport = disc->lport; 65 lport = disc->lport;
100 66
101 mutex_lock(&disc->disc_mutex); 67 mutex_lock(&disc->disc_mutex);
102 list_for_each_entry_safe(rdata, next, &disc->rports, peers) { 68 list_for_each_entry_safe(rdata, next, &disc->rports, peers)
103 rport = PRIV_TO_RPORT(rdata); 69 lport->tt.rport_logoff(rdata);
104 list_del(&rdata->peers);
105 lport->tt.rport_logoff(rport);
106 }
107
108 list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
109 rport = PRIV_TO_RPORT(rdata);
110 lport->tt.rport_logoff(rport);
111 }
112
113 mutex_unlock(&disc->disc_mutex); 70 mutex_unlock(&disc->disc_mutex);
114} 71}
115 72
116/** 73/**
117 * fc_disc_rport_callback() - Event handler for rport events
118 * @lport: The lport which is receiving the event
119 * @rport: The rport which the event has occured on
120 * @event: The event that occured
121 *
122 * Locking Note: The rport lock should not be held when calling
123 * this function.
124 */
125static void fc_disc_rport_callback(struct fc_lport *lport,
126 struct fc_rport *rport,
127 enum fc_rport_event event)
128{
129 struct fc_rport_libfc_priv *rdata = rport->dd_data;
130 struct fc_disc *disc = &lport->disc;
131
132 FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
133 rport->port_id);
134
135 switch (event) {
136 case RPORT_EV_CREATED:
137 if (disc) {
138 mutex_lock(&disc->disc_mutex);
139 list_add_tail(&rdata->peers, &disc->rports);
140 mutex_unlock(&disc->disc_mutex);
141 }
142 break;
143 case RPORT_EV_LOGO:
144 case RPORT_EV_FAILED:
145 case RPORT_EV_STOP:
146 mutex_lock(&disc->disc_mutex);
147 mutex_lock(&rdata->rp_mutex);
148 if (rdata->trans_state == FC_PORTSTATE_ROGUE)
149 list_del(&rdata->peers);
150 mutex_unlock(&rdata->rp_mutex);
151 mutex_unlock(&disc->disc_mutex);
152 break;
153 default:
154 break;
155 }
156
157}
158
159/**
160 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) 74 * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
161 * @sp: Current sequence of the RSCN exchange 75 * @sp: Current sequence of the RSCN exchange
162 * @fp: RSCN Frame 76 * @fp: RSCN Frame
@@ -169,8 +83,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
169 struct fc_disc *disc) 83 struct fc_disc *disc)
170{ 84{
171 struct fc_lport *lport; 85 struct fc_lport *lport;
172 struct fc_rport *rport;
173 struct fc_rport_libfc_priv *rdata;
174 struct fc_els_rscn *rp; 86 struct fc_els_rscn *rp;
175 struct fc_els_rscn_page *pp; 87 struct fc_els_rscn_page *pp;
176 struct fc_seq_els_data rjt_data; 88 struct fc_seq_els_data rjt_data;
@@ -224,10 +136,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
224 break; 136 break;
225 } 137 }
226 dp->lp = lport; 138 dp->lp = lport;
227 dp->ids.port_id = ntoh24(pp->rscn_fid); 139 dp->port_id = ntoh24(pp->rscn_fid);
228 dp->ids.port_name = -1;
229 dp->ids.node_name = -1;
230 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
231 list_add_tail(&dp->peers, &disc_ports); 140 list_add_tail(&dp->peers, &disc_ports);
232 break; 141 break;
233 case ELS_ADDR_FMT_AREA: 142 case ELS_ADDR_FMT_AREA:
@@ -240,6 +149,19 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
240 } 149 }
241 } 150 }
242 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 151 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
152
153 /*
154 * If not doing a complete rediscovery, do GPN_ID on
155 * the individual ports mentioned in the list.
156 * If any of these get an error, do a full rediscovery.
157 * In any case, go through the list and free the entries.
158 */
159 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
160 list_del(&dp->peers);
161 if (!redisc)
162 redisc = fc_disc_single(lport, dp);
163 kfree(dp);
164 }
243 if (redisc) { 165 if (redisc) {
244 FC_DISC_DBG(disc, "RSCN received: rediscovering\n"); 166 FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
245 fc_disc_restart(disc); 167 fc_disc_restart(disc);
@@ -247,16 +169,6 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
247 FC_DISC_DBG(disc, "RSCN received: not rediscovering. " 169 FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
248 "redisc %d state %d in_prog %d\n", 170 "redisc %d state %d in_prog %d\n",
249 redisc, lport->state, disc->pending); 171 redisc, lport->state, disc->pending);
250 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
251 list_del(&dp->peers);
252 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
253 if (rport) {
254 rdata = rport->dd_data;
255 list_del(&rdata->peers);
256 lport->tt.rport_logoff(rport);
257 }
258 fc_disc_single(disc, dp);
259 }
260 } 172 }
261 fc_frame_free(fp); 173 fc_frame_free(fp);
262 return; 174 return;
@@ -308,35 +220,34 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
308 */ 220 */
309static void fc_disc_restart(struct fc_disc *disc) 221static void fc_disc_restart(struct fc_disc *disc)
310{ 222{
311 struct fc_rport *rport; 223 if (!disc->disc_callback)
312 struct fc_rport_libfc_priv *rdata, *next; 224 return;
313 struct fc_lport *lport = disc->lport;
314 225
315 FC_DISC_DBG(disc, "Restarting discovery\n"); 226 FC_DISC_DBG(disc, "Restarting discovery\n");
316 227
317 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
318 rport = PRIV_TO_RPORT(rdata);
319 list_del(&rdata->peers);
320 lport->tt.rport_logoff(rport);
321 }
322
323 disc->requested = 1; 228 disc->requested = 1;
324 if (!disc->pending) 229 if (disc->pending)
325 fc_disc_gpn_ft_req(disc); 230 return;
231
232 /*
233 * Advance disc_id. This is an arbitrary non-zero number that will
234 * match the value in the fc_rport_priv after discovery for all
235 * freshly-discovered remote ports. Avoid wrapping to zero.
236 */
237 disc->disc_id = (disc->disc_id + 2) | 1;
238 disc->retry_count = 0;
239 fc_disc_gpn_ft_req(disc);
326} 240}
327 241
328/** 242/**
329 * fc_disc_start() - Fibre Channel Target discovery 243 * fc_disc_start() - Fibre Channel Target discovery
330 * @lport: FC local port 244 * @lport: FC local port
331 * 245 * @disc_callback: function to be called when discovery is complete
332 * Returns non-zero if discovery cannot be started.
333 */ 246 */
334static void fc_disc_start(void (*disc_callback)(struct fc_lport *, 247static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
335 enum fc_disc_event), 248 enum fc_disc_event),
336 struct fc_lport *lport) 249 struct fc_lport *lport)
337{ 250{
338 struct fc_rport *rport;
339 struct fc_rport_identifiers ids;
340 struct fc_disc *disc = &lport->disc; 251 struct fc_disc *disc = &lport->disc;
341 252
342 /* 253 /*
@@ -345,145 +256,47 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
345 * and send the GPN_FT request. 256 * and send the GPN_FT request.
346 */ 257 */
347 mutex_lock(&disc->disc_mutex); 258 mutex_lock(&disc->disc_mutex);
348
349 disc->disc_callback = disc_callback; 259 disc->disc_callback = disc_callback;
350 260 fc_disc_restart(disc);
351 /*
352 * If not ready, or already running discovery, just set request flag.
353 */
354 disc->requested = 1;
355
356 if (disc->pending) {
357 mutex_unlock(&disc->disc_mutex);
358 return;
359 }
360
361 /*
362 * Handle point-to-point mode as a simple discovery
363 * of the remote port. Yucky, yucky, yuck, yuck!
364 */
365 rport = disc->lport->ptp_rp;
366 if (rport) {
367 ids.port_id = rport->port_id;
368 ids.port_name = rport->port_name;
369 ids.node_name = rport->node_name;
370 ids.roles = FC_RPORT_ROLE_UNKNOWN;
371 get_device(&rport->dev);
372
373 if (!fc_disc_new_target(disc, rport, &ids)) {
374 disc->event = DISC_EV_SUCCESS;
375 fc_disc_done(disc);
376 }
377 put_device(&rport->dev);
378 } else {
379 fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
380 }
381
382 mutex_unlock(&disc->disc_mutex); 261 mutex_unlock(&disc->disc_mutex);
383} 262}
384 263
385static struct fc_rport_operations fc_disc_rport_ops = {
386 .event_callback = fc_disc_rport_callback,
387};
388
389/**
390 * fc_disc_new_target() - Handle new target found by discovery
391 * @lport: FC local port
392 * @rport: The previous FC remote port (NULL if new remote port)
393 * @ids: Identifiers for the new FC remote port
394 *
395 * Locking Note: This function expects that the disc_mutex is locked
396 * before it is called.
397 */
398static int fc_disc_new_target(struct fc_disc *disc,
399 struct fc_rport *rport,
400 struct fc_rport_identifiers *ids)
401{
402 struct fc_lport *lport = disc->lport;
403 struct fc_rport_libfc_priv *rdata;
404 int error = 0;
405
406 if (rport && ids->port_name) {
407 if (rport->port_name == -1) {
408 /*
409 * Set WWN and fall through to notify of create.
410 */
411 fc_rport_set_name(rport, ids->port_name,
412 rport->node_name);
413 } else if (rport->port_name != ids->port_name) {
414 /*
415 * This is a new port with the same FCID as
416 * a previously-discovered port. Presumably the old
417 * port logged out and a new port logged in and was
418 * assigned the same FCID. This should be rare.
419 * Delete the old one and fall thru to re-create.
420 */
421 fc_disc_del_target(disc, rport);
422 rport = NULL;
423 }
424 }
425 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
426 ids->port_id != fc_host_port_id(lport->host) &&
427 ids->port_name != lport->wwpn) {
428 if (!rport) {
429 rport = lport->tt.rport_lookup(lport, ids->port_id);
430 if (!rport) {
431 struct fc_disc_port dp;
432 dp.lp = lport;
433 dp.ids.port_id = ids->port_id;
434 dp.ids.port_name = ids->port_name;
435 dp.ids.node_name = ids->node_name;
436 dp.ids.roles = ids->roles;
437 rport = lport->tt.rport_create(&dp);
438 }
439 if (!rport)
440 error = -ENOMEM;
441 }
442 if (rport) {
443 rdata = rport->dd_data;
444 rdata->ops = &fc_disc_rport_ops;
445 rdata->rp_state = RPORT_ST_INIT;
446 list_add_tail(&rdata->peers, &disc->rogue_rports);
447 lport->tt.rport_login(rport);
448 }
449 }
450 return error;
451}
452
453/**
454 * fc_disc_del_target() - Delete a target
455 * @disc: FC discovery context
456 * @rport: The remote port to be removed
457 */
458static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
459{
460 struct fc_lport *lport = disc->lport;
461 struct fc_rport_libfc_priv *rdata = rport->dd_data;
462 list_del(&rdata->peers);
463 lport->tt.rport_logoff(rport);
464}
465
466/** 264/**
467 * fc_disc_done() - Discovery has been completed 265 * fc_disc_done() - Discovery has been completed
468 * @disc: FC discovery context 266 * @disc: FC discovery context
267 * @event: discovery completion status
268 *
469 * Locking Note: This function expects that the disc mutex is locked before 269 * Locking Note: This function expects that the disc mutex is locked before
470 * it is called. The discovery callback is then made with the lock released, 270 * it is called. The discovery callback is then made with the lock released,
471 * and the lock is re-taken before returning from this function 271 * and the lock is re-taken before returning from this function
472 */ 272 */
473static void fc_disc_done(struct fc_disc *disc) 273static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
474{ 274{
475 struct fc_lport *lport = disc->lport; 275 struct fc_lport *lport = disc->lport;
476 enum fc_disc_event event; 276 struct fc_rport_priv *rdata;
477 277
478 FC_DISC_DBG(disc, "Discovery complete\n"); 278 FC_DISC_DBG(disc, "Discovery complete\n");
479 279
480 event = disc->event; 280 disc->pending = 0;
481 disc->event = DISC_EV_NONE; 281 if (disc->requested) {
282 fc_disc_restart(disc);
283 return;
284 }
482 285
483 if (disc->requested) 286 /*
484 fc_disc_gpn_ft_req(disc); 287 * Go through all remote ports. If they were found in the latest
485 else 288 * discovery, reverify or log them in. Otherwise, log them out.
486 disc->pending = 0; 289 * Skip ports which were never discovered. These are the dNS port
290 * and ports which were created by PLOGI.
291 */
292 list_for_each_entry(rdata, &disc->rports, peers) {
293 if (!rdata->disc_id)
294 continue;
295 if (rdata->disc_id == disc->disc_id)
296 lport->tt.rport_login(rdata);
297 else
298 lport->tt.rport_logoff(rdata);
299 }
487 300
488 mutex_unlock(&disc->disc_mutex); 301 mutex_unlock(&disc->disc_mutex);
489 disc->disc_callback(lport, event); 302 disc->disc_callback(lport, event);
@@ -522,11 +335,8 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
522 } 335 }
523 disc->retry_count++; 336 disc->retry_count++;
524 schedule_delayed_work(&disc->disc_work, delay); 337 schedule_delayed_work(&disc->disc_work, delay);
525 } else { 338 } else
526 /* exceeded retries */ 339 fc_disc_done(disc, DISC_EV_FAILED);
527 disc->event = DISC_EV_FAILED;
528 fc_disc_done(disc);
529 }
530 } 340 }
531} 341}
532 342
@@ -555,7 +365,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc)
555 if (!fp) 365 if (!fp)
556 goto err; 366 goto err;
557 367
558 if (lport->tt.elsct_send(lport, NULL, fp, 368 if (lport->tt.elsct_send(lport, 0, fp,
559 FC_NS_GPN_FT, 369 FC_NS_GPN_FT,
560 fc_disc_gpn_ft_resp, 370 fc_disc_gpn_ft_resp,
561 disc, lport->e_d_tov)) 371 disc, lport->e_d_tov))
@@ -565,10 +375,12 @@ err:
565} 375}
566 376
567/** 377/**
568 * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request 378 * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
569 * @lport: Fibre Channel host port instance 379 * @lport: Fibre Channel host port instance
570 * @buf: GPN_FT response buffer 380 * @buf: GPN_FT response buffer
571 * @len: size of response buffer 381 * @len: size of response buffer
382 *
383 * Goes through the list of IDs and names resulting from a request.
572 */ 384 */
573static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) 385static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
574{ 386{
@@ -578,11 +390,11 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
578 size_t plen; 390 size_t plen;
579 size_t tlen; 391 size_t tlen;
580 int error = 0; 392 int error = 0;
581 struct fc_disc_port dp; 393 struct fc_rport_identifiers ids;
582 struct fc_rport *rport; 394 struct fc_rport_priv *rdata;
583 struct fc_rport_libfc_priv *rdata;
584 395
585 lport = disc->lport; 396 lport = disc->lport;
397 disc->seq_count++;
586 398
587 /* 399 /*
588 * Handle partial name record left over from previous call. 400 * Handle partial name record left over from previous call.
@@ -591,6 +403,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
591 plen = len; 403 plen = len;
592 np = (struct fc_gpn_ft_resp *)bp; 404 np = (struct fc_gpn_ft_resp *)bp;
593 tlen = disc->buf_len; 405 tlen = disc->buf_len;
406 disc->buf_len = 0;
594 if (tlen) { 407 if (tlen) {
595 WARN_ON(tlen >= sizeof(*np)); 408 WARN_ON(tlen >= sizeof(*np));
596 plen = sizeof(*np) - tlen; 409 plen = sizeof(*np) - tlen;
@@ -621,31 +434,25 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
621 * After the first time through the loop, things return to "normal". 434 * After the first time through the loop, things return to "normal".
622 */ 435 */
623 while (plen >= sizeof(*np)) { 436 while (plen >= sizeof(*np)) {
624 dp.lp = lport; 437 ids.port_id = ntoh24(np->fp_fid);
625 dp.ids.port_id = ntoh24(np->fp_fid); 438 ids.port_name = ntohll(np->fp_wwpn);
626 dp.ids.port_name = ntohll(np->fp_wwpn); 439
627 dp.ids.node_name = -1; 440 if (ids.port_id != fc_host_port_id(lport->host) &&
628 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN; 441 ids.port_name != lport->wwpn) {
629 442 rdata = lport->tt.rport_create(lport, ids.port_id);
630 if ((dp.ids.port_id != fc_host_port_id(lport->host)) && 443 if (rdata) {
631 (dp.ids.port_name != lport->wwpn)) { 444 rdata->ids.port_name = ids.port_name;
632 rport = lport->tt.rport_create(&dp); 445 rdata->disc_id = disc->disc_id;
633 if (rport) { 446 } else {
634 rdata = rport->dd_data;
635 rdata->ops = &fc_disc_rport_ops;
636 rdata->local_port = lport;
637 list_add_tail(&rdata->peers,
638 &disc->rogue_rports);
639 lport->tt.rport_login(rport);
640 } else
641 printk(KERN_WARNING "libfc: Failed to allocate " 447 printk(KERN_WARNING "libfc: Failed to allocate "
642 "memory for the newly discovered port " 448 "memory for the newly discovered port "
643 "(%6x)\n", dp.ids.port_id); 449 "(%6x)\n", ids.port_id);
450 error = -ENOMEM;
451 }
644 } 452 }
645 453
646 if (np->fp_flags & FC_NS_FID_LAST) { 454 if (np->fp_flags & FC_NS_FID_LAST) {
647 disc->event = DISC_EV_SUCCESS; 455 fc_disc_done(disc, DISC_EV_SUCCESS);
648 fc_disc_done(disc);
649 len = 0; 456 len = 0;
650 break; 457 break;
651 } 458 }
@@ -665,8 +472,6 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
665 memcpy(&disc->partial_buf, np, len); 472 memcpy(&disc->partial_buf, np, len);
666 } 473 }
667 disc->buf_len = (unsigned char) len; 474 disc->buf_len = (unsigned char) len;
668 } else {
669 disc->buf_len = 0;
670 } 475 }
671 return error; 476 return error;
672} 477}
@@ -683,8 +488,7 @@ static void fc_disc_timeout(struct work_struct *work)
683 struct fc_disc, 488 struct fc_disc,
684 disc_work.work); 489 disc_work.work);
685 mutex_lock(&disc->disc_mutex); 490 mutex_lock(&disc->disc_mutex);
686 if (disc->requested && !disc->pending) 491 fc_disc_gpn_ft_req(disc);
687 fc_disc_gpn_ft_req(disc);
688 mutex_unlock(&disc->disc_mutex); 492 mutex_unlock(&disc->disc_mutex);
689} 493}
690 494
@@ -703,10 +507,10 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
703 struct fc_disc *disc = disc_arg; 507 struct fc_disc *disc = disc_arg;
704 struct fc_ct_hdr *cp; 508 struct fc_ct_hdr *cp;
705 struct fc_frame_header *fh; 509 struct fc_frame_header *fh;
510 enum fc_disc_event event = DISC_EV_NONE;
706 unsigned int seq_cnt; 511 unsigned int seq_cnt;
707 void *buf = NULL;
708 unsigned int len; 512 unsigned int len;
709 int error; 513 int error = 0;
710 514
711 mutex_lock(&disc->disc_mutex); 515 mutex_lock(&disc->disc_mutex);
712 FC_DISC_DBG(disc, "Received a GPN_FT response\n"); 516 FC_DISC_DBG(disc, "Received a GPN_FT response\n");
@@ -721,77 +525,158 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
721 fh = fc_frame_header_get(fp); 525 fh = fc_frame_header_get(fp);
722 len = fr_len(fp) - sizeof(*fh); 526 len = fr_len(fp) - sizeof(*fh);
723 seq_cnt = ntohs(fh->fh_seq_cnt); 527 seq_cnt = ntohs(fh->fh_seq_cnt);
724 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && 528 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
725 disc->seq_count == 0) {
726 cp = fc_frame_payload_get(fp, sizeof(*cp)); 529 cp = fc_frame_payload_get(fp, sizeof(*cp));
727 if (!cp) { 530 if (!cp) {
728 FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n", 531 FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
729 fr_len(fp)); 532 fr_len(fp));
533 event = DISC_EV_FAILED;
730 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { 534 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
731 535
732 /* Accepted, parse the response. */ 536 /* Accepted, parse the response. */
733 buf = cp + 1;
734 len -= sizeof(*cp); 537 len -= sizeof(*cp);
538 error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
735 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { 539 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
736 FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x " 540 FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
737 "(check zoning)\n", cp->ct_reason, 541 "(check zoning)\n", cp->ct_reason,
738 cp->ct_explan); 542 cp->ct_explan);
739 disc->event = DISC_EV_FAILED; 543 event = DISC_EV_FAILED;
740 fc_disc_done(disc); 544 if (cp->ct_reason == FC_FS_RJT_UNABL &&
545 cp->ct_explan == FC_FS_EXP_FTNR)
546 event = DISC_EV_SUCCESS;
741 } else { 547 } else {
742 FC_DISC_DBG(disc, "GPN_FT unexpected response code " 548 FC_DISC_DBG(disc, "GPN_FT unexpected response code "
743 "%x\n", ntohs(cp->ct_cmd)); 549 "%x\n", ntohs(cp->ct_cmd));
550 event = DISC_EV_FAILED;
744 } 551 }
745 } else if (fr_sof(fp) == FC_SOF_N3 && 552 } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
746 seq_cnt == disc->seq_count) { 553 error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
747 buf = fh + 1;
748 } else { 554 } else {
749 FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? " 555 FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
750 "seq_cnt %x expected %x sof %x eof %x\n", 556 "seq_cnt %x expected %x sof %x eof %x\n",
751 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp)); 557 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
558 event = DISC_EV_FAILED;
752 } 559 }
753 if (buf) { 560 if (error)
754 error = fc_disc_gpn_ft_parse(disc, buf, len); 561 fc_disc_error(disc, fp);
755 if (error) 562 else if (event != DISC_EV_NONE)
756 fc_disc_error(disc, fp); 563 fc_disc_done(disc, event);
757 else
758 disc->seq_count++;
759 }
760 fc_frame_free(fp); 564 fc_frame_free(fp);
761
762 mutex_unlock(&disc->disc_mutex); 565 mutex_unlock(&disc->disc_mutex);
763} 566}
764 567
765/** 568/**
766 * fc_disc_single() - Discover the directory information for a single target 569 * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
767 * @lport: FC local port 570 * @sp: exchange sequence
768 * @dp: The port to rediscover 571 * @fp: response frame
572 * @rdata_arg: remote port private data
769 * 573 *
770 * Locking Note: This function expects that the disc_mutex is locked 574 * Locking Note: This function is called without disc mutex held.
771 * before it is called.
772 */ 575 */
773static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) 576static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
577 void *rdata_arg)
774{ 578{
579 struct fc_rport_priv *rdata = rdata_arg;
580 struct fc_rport_priv *new_rdata;
775 struct fc_lport *lport; 581 struct fc_lport *lport;
776 struct fc_rport *new_rport; 582 struct fc_disc *disc;
777 struct fc_rport_libfc_priv *rdata; 583 struct fc_ct_hdr *cp;
584 struct fc_ns_gid_pn *pn;
585 u64 port_name;
778 586
779 lport = disc->lport; 587 lport = rdata->local_port;
588 disc = &lport->disc;
780 589
781 if (dp->ids.port_id == fc_host_port_id(lport->host)) 590 mutex_lock(&disc->disc_mutex);
591 if (PTR_ERR(fp) == -FC_EX_CLOSED)
782 goto out; 592 goto out;
783 593 if (IS_ERR(fp))
784 new_rport = lport->tt.rport_create(dp); 594 goto redisc;
785 if (new_rport) { 595
786 rdata = new_rport->dd_data; 596 cp = fc_frame_payload_get(fp, sizeof(*cp));
787 rdata->ops = &fc_disc_rport_ops; 597 if (!cp)
788 kfree(dp); 598 goto redisc;
789 list_add_tail(&rdata->peers, &disc->rogue_rports); 599 if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
790 lport->tt.rport_login(new_rport); 600 if (fr_len(fp) < sizeof(struct fc_frame_header) +
601 sizeof(*cp) + sizeof(*pn))
602 goto redisc;
603 pn = (struct fc_ns_gid_pn *)(cp + 1);
604 port_name = get_unaligned_be64(&pn->fn_wwpn);
605 if (rdata->ids.port_name == -1)
606 rdata->ids.port_name = port_name;
607 else if (rdata->ids.port_name != port_name) {
608 FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
609 "Port-id %x wwpn %llx\n",
610 rdata->ids.port_id, port_name);
611 lport->tt.rport_logoff(rdata);
612
613 new_rdata = lport->tt.rport_create(lport,
614 rdata->ids.port_id);
615 if (new_rdata) {
616 new_rdata->disc_id = disc->disc_id;
617 lport->tt.rport_login(new_rdata);
618 }
619 goto out;
620 }
621 rdata->disc_id = disc->disc_id;
622 lport->tt.rport_login(rdata);
623 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
624 FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
625 cp->ct_reason, cp->ct_explan);
626 lport->tt.rport_logoff(rdata);
627 } else {
628 FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
629 ntohs(cp->ct_cmd));
630redisc:
631 fc_disc_restart(disc);
791 } 632 }
792 return;
793out: 633out:
794 kfree(dp); 634 mutex_unlock(&disc->disc_mutex);
635 kref_put(&rdata->kref, lport->tt.rport_destroy);
636}
637
638/**
639 * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
640 * @lport: local port
641 * @rdata: remote port private data
642 *
643 * Locking Note: This function expects that the disc_mutex is locked
644 * before it is called.
645 * On failure, an error code is returned.
646 */
647static int fc_disc_gpn_id_req(struct fc_lport *lport,
648 struct fc_rport_priv *rdata)
649{
650 struct fc_frame *fp;
651
652 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
653 sizeof(struct fc_ns_fid));
654 if (!fp)
655 return -ENOMEM;
656 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
657 fc_disc_gpn_id_resp, rdata, lport->e_d_tov))
658 return -ENOMEM;
659 kref_get(&rdata->kref);
660 return 0;
661}
662
663/**
664 * fc_disc_single() - Discover the directory information for a single target
665 * @lport: local port
666 * @dp: The port to rediscover
667 *
668 * Locking Note: This function expects that the disc_mutex is locked
669 * before it is called.
670 */
671static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
672{
673 struct fc_rport_priv *rdata;
674
675 rdata = lport->tt.rport_create(lport, dp->port_id);
676 if (!rdata)
677 return -ENOMEM;
678 rdata->disc_id = 0;
679 return fc_disc_gpn_id_req(lport, rdata);
795} 680}
796 681
797/** 682/**
@@ -841,18 +726,12 @@ int fc_disc_init(struct fc_lport *lport)
841 if (!lport->tt.disc_recv_req) 726 if (!lport->tt.disc_recv_req)
842 lport->tt.disc_recv_req = fc_disc_recv_req; 727 lport->tt.disc_recv_req = fc_disc_recv_req;
843 728
844 if (!lport->tt.rport_lookup)
845 lport->tt.rport_lookup = fc_disc_lookup_rport;
846
847 disc = &lport->disc; 729 disc = &lport->disc;
848 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); 730 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
849 mutex_init(&disc->disc_mutex); 731 mutex_init(&disc->disc_mutex);
850 INIT_LIST_HEAD(&disc->rports); 732 INIT_LIST_HEAD(&disc->rports);
851 INIT_LIST_HEAD(&disc->rogue_rports);
852 733
853 disc->lport = lport; 734 disc->lport = lport;
854 disc->delay = FC_DISC_DELAY;
855 disc->event = DISC_EV_NONE;
856 735
857 return 0; 736 return 0;
858} 737}
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 5878b34bff18..5cfa68732e9d 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -32,7 +32,7 @@
32 * fc_elsct_send - sends ELS/CT frame 32 * fc_elsct_send - sends ELS/CT frame
33 */ 33 */
34static struct fc_seq *fc_elsct_send(struct fc_lport *lport, 34static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
35 struct fc_rport *rport, 35 u32 did,
36 struct fc_frame *fp, 36 struct fc_frame *fp,
37 unsigned int op, 37 unsigned int op,
38 void (*resp)(struct fc_seq *, 38 void (*resp)(struct fc_seq *,
@@ -41,16 +41,17 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
41 void *arg, u32 timer_msec) 41 void *arg, u32 timer_msec)
42{ 42{
43 enum fc_rctl r_ctl; 43 enum fc_rctl r_ctl;
44 u32 did = FC_FID_NONE;
45 enum fc_fh_type fh_type; 44 enum fc_fh_type fh_type;
46 int rc; 45 int rc;
47 46
48 /* ELS requests */ 47 /* ELS requests */
49 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) 48 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
50 rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type); 49 rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
51 else 50 else {
52 /* CT requests */ 51 /* CT requests */
53 rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type); 52 rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type);
53 did = FC_FID_DIR_SERV;
54 }
54 55
55 if (rc) 56 if (rc)
56 return NULL; 57 return NULL;
@@ -69,3 +70,41 @@ int fc_elsct_init(struct fc_lport *lport)
69 return 0; 70 return 0;
70} 71}
71EXPORT_SYMBOL(fc_elsct_init); 72EXPORT_SYMBOL(fc_elsct_init);
73
74/**
75 * fc_els_resp_type() - return string describing ELS response for debug.
76 * @fp: frame pointer with possible error code.
77 */
78const char *fc_els_resp_type(struct fc_frame *fp)
79{
80 const char *msg;
81 if (IS_ERR(fp)) {
82 switch (-PTR_ERR(fp)) {
83 case FC_NO_ERR:
84 msg = "response no error";
85 break;
86 case FC_EX_TIMEOUT:
87 msg = "response timeout";
88 break;
89 case FC_EX_CLOSED:
90 msg = "response closed";
91 break;
92 default:
93 msg = "response unknown error";
94 break;
95 }
96 } else {
97 switch (fc_frame_payload_op(fp)) {
98 case ELS_LS_ACC:
99 msg = "accept";
100 break;
101 case ELS_LS_RJT:
102 msg = "reject";
103 break;
104 default:
105 msg = "response unknown ELS";
106 break;
107 }
108 }
109 return msg;
110}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 145ab9ba55ea..c1c15748220c 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -32,6 +32,9 @@
32#include <scsi/libfc.h> 32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h> 33#include <scsi/fc_encode.h>
34 34
35u16 fc_cpu_mask; /* cpu mask for possible cpus */
36EXPORT_SYMBOL(fc_cpu_mask);
37static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
35static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ 38static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
36 39
37/* 40/*
@@ -48,6 +51,20 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
48 */ 51 */
49 52
50/* 53/*
54 * Per cpu exchange pool
55 *
56 * This structure manages per cpu exchanges in array of exchange pointers.
57 * This array is allocated followed by struct fc_exch_pool memory for
58 * assigned range of exchanges to per cpu pool.
59 */
60struct fc_exch_pool {
61 u16 next_index; /* next possible free exchange index */
62 u16 total_exches; /* total allocated exchanges */
63 spinlock_t lock; /* exch pool lock */
64 struct list_head ex_list; /* allocated exchanges list */
65};
66
67/*
51 * Exchange manager. 68 * Exchange manager.
52 * 69 *
53 * This structure is the center for creating exchanges and sequences. 70 * This structure is the center for creating exchanges and sequences.
@@ -55,17 +72,13 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
55 */ 72 */
56struct fc_exch_mgr { 73struct fc_exch_mgr {
57 enum fc_class class; /* default class for sequences */ 74 enum fc_class class; /* default class for sequences */
58 spinlock_t em_lock; /* exchange manager lock, 75 struct kref kref; /* exchange mgr reference count */
59 must be taken before ex_lock */
60 u16 last_xid; /* last allocated exchange ID */
61 u16 min_xid; /* min exchange ID */ 76 u16 min_xid; /* min exchange ID */
62 u16 max_xid; /* max exchange ID */ 77 u16 max_xid; /* max exchange ID */
63 u16 max_read; /* max exchange ID for read */
64 u16 last_read; /* last xid allocated for read */
65 u32 total_exches; /* total allocated exchanges */
66 struct list_head ex_list; /* allocated exchanges list */ 78 struct list_head ex_list; /* allocated exchanges list */
67 struct fc_lport *lp; /* fc device instance */
68 mempool_t *ep_pool; /* reserve ep's */ 79 mempool_t *ep_pool; /* reserve ep's */
80 u16 pool_max_index; /* max exch array index in exch pool */
81 struct fc_exch_pool *pool; /* per cpu exch pool */
69 82
70 /* 83 /*
71 * currently exchange mgr stats are updated but not used. 84 * currently exchange mgr stats are updated but not used.
@@ -80,10 +93,15 @@ struct fc_exch_mgr {
80 atomic_t seq_not_found; 93 atomic_t seq_not_found;
81 atomic_t non_bls_resp; 94 atomic_t non_bls_resp;
82 } stats; 95 } stats;
83 struct fc_exch **exches; /* for exch pointers indexed by xid */
84}; 96};
85#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) 97#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
86 98
99struct fc_exch_mgr_anchor {
100 struct list_head ema_list;
101 struct fc_exch_mgr *mp;
102 bool (*match)(struct fc_frame *);
103};
104
87static void fc_exch_rrq(struct fc_exch *); 105static void fc_exch_rrq(struct fc_exch *);
88static void fc_seq_ls_acc(struct fc_seq *); 106static void fc_seq_ls_acc(struct fc_seq *);
89static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, 107static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
@@ -167,8 +185,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
167 * sequence allocation and deallocation must be locked. 185 * sequence allocation and deallocation must be locked.
168 * - exchange refcnt can be done atomicly without locks. 186 * - exchange refcnt can be done atomicly without locks.
169 * - sequence allocation must be locked by exch lock. 187 * - sequence allocation must be locked by exch lock.
170 * - If the em_lock and ex_lock must be taken at the same time, then the 188 * - If the EM pool lock and ex_lock must be taken at the same time, then the
171 * em_lock must be taken before the ex_lock. 189 * EM pool lock must be taken before the ex_lock.
172 */ 190 */
173 191
174/* 192/*
@@ -268,8 +286,6 @@ static void fc_exch_release(struct fc_exch *ep)
268 mp = ep->em; 286 mp = ep->em;
269 if (ep->destructor) 287 if (ep->destructor)
270 ep->destructor(&ep->seq, ep->arg); 288 ep->destructor(&ep->seq, ep->arg);
271 if (ep->lp->tt.exch_put)
272 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
273 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE)); 289 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
274 mempool_free(ep, mp->ep_pool); 290 mempool_free(ep, mp->ep_pool);
275 } 291 }
@@ -299,17 +315,31 @@ static int fc_exch_done_locked(struct fc_exch *ep)
299 return rc; 315 return rc;
300} 316}
301 317
302static void fc_exch_mgr_delete_ep(struct fc_exch *ep) 318static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
319 u16 index)
303{ 320{
304 struct fc_exch_mgr *mp; 321 struct fc_exch **exches = (struct fc_exch **)(pool + 1);
322 return exches[index];
323}
305 324
306 mp = ep->em; 325static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
307 spin_lock_bh(&mp->em_lock); 326 struct fc_exch *ep)
308 WARN_ON(mp->total_exches <= 0); 327{
309 mp->total_exches--; 328 ((struct fc_exch **)(pool + 1))[index] = ep;
310 mp->exches[ep->xid - mp->min_xid] = NULL; 329}
330
331static void fc_exch_delete(struct fc_exch *ep)
332{
333 struct fc_exch_pool *pool;
334
335 pool = ep->pool;
336 spin_lock_bh(&pool->lock);
337 WARN_ON(pool->total_exches <= 0);
338 pool->total_exches--;
339 fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
340 NULL);
311 list_del(&ep->ex_list); 341 list_del(&ep->ex_list);
312 spin_unlock_bh(&mp->em_lock); 342 spin_unlock_bh(&pool->lock);
313 fc_exch_release(ep); /* drop hold for exch in mp */ 343 fc_exch_release(ep); /* drop hold for exch in mp */
314} 344}
315 345
@@ -322,7 +352,7 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
322 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 352 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
323 return; 353 return;
324 354
325 FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n"); 355 FC_EXCH_DBG(ep, "Exchange timer armed\n");
326 356
327 if (schedule_delayed_work(&ep->timeout_work, 357 if (schedule_delayed_work(&ep->timeout_work,
328 msecs_to_jiffies(timer_msec))) 358 msecs_to_jiffies(timer_msec)))
@@ -408,6 +438,8 @@ static void fc_exch_timeout(struct work_struct *work)
408 u32 e_stat; 438 u32 e_stat;
409 int rc = 1; 439 int rc = 1;
410 440
441 FC_EXCH_DBG(ep, "Exchange timed out\n");
442
411 spin_lock_bh(&ep->ex_lock); 443 spin_lock_bh(&ep->ex_lock);
412 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) 444 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
413 goto unlock; 445 goto unlock;
@@ -427,7 +459,7 @@ static void fc_exch_timeout(struct work_struct *work)
427 rc = fc_exch_done_locked(ep); 459 rc = fc_exch_done_locked(ep);
428 spin_unlock_bh(&ep->ex_lock); 460 spin_unlock_bh(&ep->ex_lock);
429 if (!rc) 461 if (!rc)
430 fc_exch_mgr_delete_ep(ep); 462 fc_exch_delete(ep);
431 if (resp) 463 if (resp)
432 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); 464 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
433 fc_seq_exch_abort(sp, 2 * ep->r_a_tov); 465 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
@@ -460,65 +492,20 @@ static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
460 return sp; 492 return sp;
461} 493}
462 494
463/* 495/**
464 * fc_em_alloc_xid - returns an xid based on request type 496 * fc_exch_em_alloc() - allocate an exchange from a specified EM.
465 * @lp : ptr to associated lport 497 * @lport: ptr to the local port
466 * @fp : ptr to the assocated frame 498 * @mp: ptr to the exchange manager
467 * 499 *
468 * check the associated fc_fsp_pkt to get scsi command type and 500 * Returns pointer to allocated fc_exch with exch lock held.
469 * command direction to decide from which range this exch id
470 * will be allocated from.
471 *
472 * Returns : 0 or an valid xid
473 */ 501 */
474static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp) 502static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
475{ 503 struct fc_exch_mgr *mp)
476 u16 xid, min, max;
477 u16 *plast;
478 struct fc_exch *ep = NULL;
479
480 if (mp->max_read) {
481 if (fc_fcp_is_read(fr_fsp(fp))) {
482 min = mp->min_xid;
483 max = mp->max_read;
484 plast = &mp->last_read;
485 } else {
486 min = mp->max_read + 1;
487 max = mp->max_xid;
488 plast = &mp->last_xid;
489 }
490 } else {
491 min = mp->min_xid;
492 max = mp->max_xid;
493 plast = &mp->last_xid;
494 }
495 xid = *plast;
496 do {
497 xid = (xid == max) ? min : xid + 1;
498 ep = mp->exches[xid - mp->min_xid];
499 } while ((ep != NULL) && (xid != *plast));
500
501 if (unlikely(ep))
502 xid = 0;
503 else
504 *plast = xid;
505
506 return xid;
507}
508
509/*
510 * fc_exch_alloc - allocate an exchange.
511 * @mp : ptr to the exchange manager
512 * @xid: input xid
513 *
514 * if xid is supplied zero then assign next free exchange ID
515 * from exchange manager, otherwise use supplied xid.
516 * Returns with exch lock held.
517 */
518struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
519 struct fc_frame *fp, u16 xid)
520{ 504{
521 struct fc_exch *ep; 505 struct fc_exch *ep;
506 unsigned int cpu;
507 u16 index;
508 struct fc_exch_pool *pool;
522 509
523 /* allocate memory for exchange */ 510 /* allocate memory for exchange */
524 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); 511 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
@@ -528,16 +515,17 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
528 } 515 }
529 memset(ep, 0, sizeof(*ep)); 516 memset(ep, 0, sizeof(*ep));
530 517
531 spin_lock_bh(&mp->em_lock); 518 cpu = smp_processor_id();
532 /* alloc xid if input xid 0 */ 519 pool = per_cpu_ptr(mp->pool, cpu);
533 if (!xid) { 520 spin_lock_bh(&pool->lock);
534 /* alloc a new xid */ 521 index = pool->next_index;
535 xid = fc_em_alloc_xid(mp, fp); 522 /* allocate new exch from pool */
536 if (!xid) { 523 while (fc_exch_ptr_get(pool, index)) {
537 printk(KERN_WARNING "libfc: Failed to allocate an exhange\n"); 524 index = index == mp->pool_max_index ? 0 : index + 1;
525 if (index == pool->next_index)
538 goto err; 526 goto err;
539 }
540 } 527 }
528 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
541 529
542 fc_exch_hold(ep); /* hold for exch in mp */ 530 fc_exch_hold(ep); /* hold for exch in mp */
543 spin_lock_init(&ep->ex_lock); 531 spin_lock_init(&ep->ex_lock);
@@ -548,18 +536,19 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
548 */ 536 */
549 spin_lock_bh(&ep->ex_lock); 537 spin_lock_bh(&ep->ex_lock);
550 538
551 mp->exches[xid - mp->min_xid] = ep; 539 fc_exch_ptr_set(pool, index, ep);
552 list_add_tail(&ep->ex_list, &mp->ex_list); 540 list_add_tail(&ep->ex_list, &pool->ex_list);
553 fc_seq_alloc(ep, ep->seq_id++); 541 fc_seq_alloc(ep, ep->seq_id++);
554 mp->total_exches++; 542 pool->total_exches++;
555 spin_unlock_bh(&mp->em_lock); 543 spin_unlock_bh(&pool->lock);
556 544
557 /* 545 /*
558 * update exchange 546 * update exchange
559 */ 547 */
560 ep->oxid = ep->xid = xid; 548 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
561 ep->em = mp; 549 ep->em = mp;
562 ep->lp = mp->lp; 550 ep->pool = pool;
551 ep->lp = lport;
563 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ 552 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
564 ep->rxid = FC_XID_UNKNOWN; 553 ep->rxid = FC_XID_UNKNOWN;
565 ep->class = mp->class; 554 ep->class = mp->class;
@@ -567,11 +556,36 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
567out: 556out:
568 return ep; 557 return ep;
569err: 558err:
570 spin_unlock_bh(&mp->em_lock); 559 spin_unlock_bh(&pool->lock);
571 atomic_inc(&mp->stats.no_free_exch_xid); 560 atomic_inc(&mp->stats.no_free_exch_xid);
572 mempool_free(ep, mp->ep_pool); 561 mempool_free(ep, mp->ep_pool);
573 return NULL; 562 return NULL;
574} 563}
564
565/**
566 * fc_exch_alloc() - allocate an exchange.
567 * @lport: ptr to the local port
568 * @fp: ptr to the FC frame
569 *
570 * This function walks the list of the exchange manager(EM)
571 * anchors to select a EM for new exchange allocation. The
572 * EM is selected having either a NULL match function pointer
573 * or call to match function returning true.
574 */
575struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp)
576{
577 struct fc_exch_mgr_anchor *ema;
578 struct fc_exch *ep;
579
580 list_for_each_entry(ema, &lport->ema_list, ema_list) {
581 if (!ema->match || ema->match(fp)) {
582 ep = fc_exch_em_alloc(lport, ema->mp);
583 if (ep)
584 return ep;
585 }
586 }
587 return NULL;
588}
575EXPORT_SYMBOL(fc_exch_alloc); 589EXPORT_SYMBOL(fc_exch_alloc);
576 590
577/* 591/*
@@ -579,16 +593,18 @@ EXPORT_SYMBOL(fc_exch_alloc);
579 */ 593 */
580static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) 594static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
581{ 595{
596 struct fc_exch_pool *pool;
582 struct fc_exch *ep = NULL; 597 struct fc_exch *ep = NULL;
583 598
584 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { 599 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
585 spin_lock_bh(&mp->em_lock); 600 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
586 ep = mp->exches[xid - mp->min_xid]; 601 spin_lock_bh(&pool->lock);
602 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
587 if (ep) { 603 if (ep) {
588 fc_exch_hold(ep); 604 fc_exch_hold(ep);
589 WARN_ON(ep->xid != xid); 605 WARN_ON(ep->xid != xid);
590 } 606 }
591 spin_unlock_bh(&mp->em_lock); 607 spin_unlock_bh(&pool->lock);
592 } 608 }
593 return ep; 609 return ep;
594} 610}
@@ -602,7 +618,7 @@ void fc_exch_done(struct fc_seq *sp)
602 rc = fc_exch_done_locked(ep); 618 rc = fc_exch_done_locked(ep);
603 spin_unlock_bh(&ep->ex_lock); 619 spin_unlock_bh(&ep->ex_lock);
604 if (!rc) 620 if (!rc)
605 fc_exch_mgr_delete_ep(ep); 621 fc_exch_delete(ep);
606} 622}
607EXPORT_SYMBOL(fc_exch_done); 623EXPORT_SYMBOL(fc_exch_done);
608 624
@@ -610,12 +626,14 @@ EXPORT_SYMBOL(fc_exch_done);
610 * Allocate a new exchange as responder. 626 * Allocate a new exchange as responder.
611 * Sets the responder ID in the frame header. 627 * Sets the responder ID in the frame header.
612 */ 628 */
613static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) 629static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
630 struct fc_exch_mgr *mp,
631 struct fc_frame *fp)
614{ 632{
615 struct fc_exch *ep; 633 struct fc_exch *ep;
616 struct fc_frame_header *fh; 634 struct fc_frame_header *fh;
617 635
618 ep = mp->lp->tt.exch_get(mp->lp, fp); 636 ep = fc_exch_alloc(lport, fp);
619 if (ep) { 637 if (ep) {
620 ep->class = fc_frame_class(fp); 638 ep->class = fc_frame_class(fp);
621 639
@@ -641,7 +659,7 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
641 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 659 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
642 660
643 fc_exch_hold(ep); /* hold for caller */ 661 fc_exch_hold(ep); /* hold for caller */
644 spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */ 662 spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */
645 } 663 }
646 return ep; 664 return ep;
647} 665}
@@ -651,7 +669,8 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
651 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold 669 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
652 * on the ep that should be released by the caller. 670 * on the ep that should be released by the caller.
653 */ 671 */
654static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp, 672static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
673 struct fc_exch_mgr *mp,
655 struct fc_frame *fp) 674 struct fc_frame *fp)
656{ 675{
657 struct fc_frame_header *fh = fc_frame_header_get(fp); 676 struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -705,7 +724,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp,
705 reject = FC_RJT_RX_ID; 724 reject = FC_RJT_RX_ID;
706 goto rel; 725 goto rel;
707 } 726 }
708 ep = fc_exch_resp(mp, fp); 727 ep = fc_exch_resp(lport, mp, fp);
709 if (!ep) { 728 if (!ep) {
710 reject = FC_RJT_EXCH_EST; /* XXX */ 729 reject = FC_RJT_EXCH_EST; /* XXX */
711 goto out; 730 goto out;
@@ -822,7 +841,6 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
822 struct fc_exch *ep = fc_seq_exch(sp); 841 struct fc_exch *ep = fc_seq_exch(sp);
823 842
824 spin_lock_bh(&ep->ex_lock); 843 spin_lock_bh(&ep->ex_lock);
825 WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
826 sp = fc_seq_start_next_locked(sp); 844 sp = fc_seq_start_next_locked(sp);
827 spin_unlock_bh(&ep->ex_lock); 845 spin_unlock_bh(&ep->ex_lock);
828 846
@@ -999,8 +1017,8 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
999 */ 1017 */
1000 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3); 1018 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1001 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3); 1019 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1002 fh->fh_ox_id = rx_fh->fh_rx_id; 1020 fh->fh_ox_id = rx_fh->fh_ox_id;
1003 fh->fh_rx_id = rx_fh->fh_ox_id; 1021 fh->fh_rx_id = rx_fh->fh_rx_id;
1004 fh->fh_seq_cnt = rx_fh->fh_seq_cnt; 1022 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1005 fh->fh_r_ctl = FC_RCTL_BA_RJT; 1023 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1006 fh->fh_type = FC_TYPE_BLS; 1024 fh->fh_type = FC_TYPE_BLS;
@@ -1097,7 +1115,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1097 enum fc_pf_rjt_reason reject; 1115 enum fc_pf_rjt_reason reject;
1098 1116
1099 fr_seq(fp) = NULL; 1117 fr_seq(fp) = NULL;
1100 reject = fc_seq_lookup_recip(mp, fp); 1118 reject = fc_seq_lookup_recip(lp, mp, fp);
1101 if (reject == FC_RJT_NONE) { 1119 if (reject == FC_RJT_NONE) {
1102 sp = fr_seq(fp); /* sequence will be held */ 1120 sp = fr_seq(fp); /* sequence will be held */
1103 ep = fc_seq_exch(sp); 1121 ep = fc_seq_exch(sp);
@@ -1123,7 +1141,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1123 lp->tt.lport_recv(lp, sp, fp); 1141 lp->tt.lport_recv(lp, sp, fp);
1124 fc_exch_release(ep); /* release from lookup */ 1142 fc_exch_release(ep); /* release from lookup */
1125 } else { 1143 } else {
1126 FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject); 1144 FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject);
1127 fc_frame_free(fp); 1145 fc_frame_free(fp);
1128 } 1146 }
1129} 1147}
@@ -1193,7 +1211,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1193 WARN_ON(fc_seq_exch(sp) != ep); 1211 WARN_ON(fc_seq_exch(sp) != ep);
1194 spin_unlock_bh(&ep->ex_lock); 1212 spin_unlock_bh(&ep->ex_lock);
1195 if (!rc) 1213 if (!rc)
1196 fc_exch_mgr_delete_ep(ep); 1214 fc_exch_delete(ep);
1197 } 1215 }
1198 1216
1199 /* 1217 /*
@@ -1229,13 +1247,12 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1229 struct fc_seq *sp; 1247 struct fc_seq *sp;
1230 1248
1231 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ 1249 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1232 if (!sp) { 1250
1251 if (!sp)
1233 atomic_inc(&mp->stats.xid_not_found); 1252 atomic_inc(&mp->stats.xid_not_found);
1234 FC_EM_DBG(mp, "seq lookup failed\n"); 1253 else
1235 } else {
1236 atomic_inc(&mp->stats.non_bls_resp); 1254 atomic_inc(&mp->stats.non_bls_resp);
1237 FC_EM_DBG(mp, "non-BLS response to sequence"); 1255
1238 }
1239 fc_frame_free(fp); 1256 fc_frame_free(fp);
1240} 1257}
1241 1258
@@ -1304,7 +1321,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1304 rc = fc_exch_done_locked(ep); 1321 rc = fc_exch_done_locked(ep);
1305 spin_unlock_bh(&ep->ex_lock); 1322 spin_unlock_bh(&ep->ex_lock);
1306 if (!rc) 1323 if (!rc)
1307 fc_exch_mgr_delete_ep(ep); 1324 fc_exch_delete(ep);
1308 1325
1309 if (resp) 1326 if (resp)
1310 resp(sp, fp, ex_resp_arg); 1327 resp(sp, fp, ex_resp_arg);
@@ -1447,44 +1464,77 @@ static void fc_exch_reset(struct fc_exch *ep)
1447 rc = fc_exch_done_locked(ep); 1464 rc = fc_exch_done_locked(ep);
1448 spin_unlock_bh(&ep->ex_lock); 1465 spin_unlock_bh(&ep->ex_lock);
1449 if (!rc) 1466 if (!rc)
1450 fc_exch_mgr_delete_ep(ep); 1467 fc_exch_delete(ep);
1451 1468
1452 if (resp) 1469 if (resp)
1453 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); 1470 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1454} 1471}
1455 1472
1456/* 1473/**
1457 * Reset an exchange manager, releasing all sequences and exchanges. 1474 * fc_exch_pool_reset() - Resets an per cpu exches pool.
1458 * If sid is non-zero, reset only exchanges we source from that FID. 1475 * @lport: ptr to the local port
1459 * If did is non-zero, reset only exchanges destined to that FID. 1476 * @pool: ptr to the per cpu exches pool
1477 * @sid: source FC ID
1478 * @did: destination FC ID
1479 *
1480 * Resets an per cpu exches pool, releasing its all sequences
1481 * and exchanges. If sid is non-zero, then reset only exchanges
1482 * we sourced from that FID. If did is non-zero, reset only
1483 * exchanges destined to that FID.
1460 */ 1484 */
1461void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) 1485static void fc_exch_pool_reset(struct fc_lport *lport,
1486 struct fc_exch_pool *pool,
1487 u32 sid, u32 did)
1462{ 1488{
1463 struct fc_exch *ep; 1489 struct fc_exch *ep;
1464 struct fc_exch *next; 1490 struct fc_exch *next;
1465 struct fc_exch_mgr *mp = lp->emp;
1466 1491
1467 spin_lock_bh(&mp->em_lock); 1492 spin_lock_bh(&pool->lock);
1468restart: 1493restart:
1469 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) { 1494 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1470 if ((sid == 0 || sid == ep->sid) && 1495 if ((lport == ep->lp) &&
1496 (sid == 0 || sid == ep->sid) &&
1471 (did == 0 || did == ep->did)) { 1497 (did == 0 || did == ep->did)) {
1472 fc_exch_hold(ep); 1498 fc_exch_hold(ep);
1473 spin_unlock_bh(&mp->em_lock); 1499 spin_unlock_bh(&pool->lock);
1474 1500
1475 fc_exch_reset(ep); 1501 fc_exch_reset(ep);
1476 1502
1477 fc_exch_release(ep); 1503 fc_exch_release(ep);
1478 spin_lock_bh(&mp->em_lock); 1504 spin_lock_bh(&pool->lock);
1479 1505
1480 /* 1506 /*
1481 * must restart loop incase while lock was down 1507 * must restart loop incase while lock
1482 * multiple eps were released. 1508 * was down multiple eps were released.
1483 */ 1509 */
1484 goto restart; 1510 goto restart;
1485 } 1511 }
1486 } 1512 }
1487 spin_unlock_bh(&mp->em_lock); 1513 spin_unlock_bh(&pool->lock);
1514}
1515
1516/**
1517 * fc_exch_mgr_reset() - Resets all EMs of a lport
1518 * @lport: ptr to the local port
1519 * @sid: source FC ID
1520 * @did: destination FC ID
1521 *
1522 * Reset all EMs of a lport, releasing its all sequences and
1523 * exchanges. If sid is non-zero, then reset only exchanges
1524 * we sourced from that FID. If did is non-zero, reset only
1525 * exchanges destined to that FID.
1526 */
1527void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1528{
1529 struct fc_exch_mgr_anchor *ema;
1530 unsigned int cpu;
1531
1532 list_for_each_entry(ema, &lport->ema_list, ema_list) {
1533 for_each_possible_cpu(cpu)
1534 fc_exch_pool_reset(lport,
1535 per_cpu_ptr(ema->mp->pool, cpu),
1536 sid, did);
1537 }
1488} 1538}
1489EXPORT_SYMBOL(fc_exch_mgr_reset); 1539EXPORT_SYMBOL(fc_exch_mgr_reset);
1490 1540
@@ -1730,85 +1780,129 @@ reject:
1730 fc_frame_free(fp); 1780 fc_frame_free(fp);
1731} 1781}
1732 1782
1783struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
1784 struct fc_exch_mgr *mp,
1785 bool (*match)(struct fc_frame *))
1786{
1787 struct fc_exch_mgr_anchor *ema;
1788
1789 ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
1790 if (!ema)
1791 return ema;
1792
1793 ema->mp = mp;
1794 ema->match = match;
1795 /* add EM anchor to EM anchors list */
1796 list_add_tail(&ema->ema_list, &lport->ema_list);
1797 kref_get(&mp->kref);
1798 return ema;
1799}
1800EXPORT_SYMBOL(fc_exch_mgr_add);
1801
1802static void fc_exch_mgr_destroy(struct kref *kref)
1803{
1804 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
1805
1806 mempool_destroy(mp->ep_pool);
1807 free_percpu(mp->pool);
1808 kfree(mp);
1809}
1810
1811void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
1812{
1813 /* remove EM anchor from EM anchors list */
1814 list_del(&ema->ema_list);
1815 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
1816 kfree(ema);
1817}
1818EXPORT_SYMBOL(fc_exch_mgr_del);
1819
1733struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp, 1820struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1734 enum fc_class class, 1821 enum fc_class class,
1735 u16 min_xid, u16 max_xid) 1822 u16 min_xid, u16 max_xid,
1823 bool (*match)(struct fc_frame *))
1736{ 1824{
1737 struct fc_exch_mgr *mp; 1825 struct fc_exch_mgr *mp;
1738 size_t len; 1826 u16 pool_exch_range;
1827 size_t pool_size;
1828 unsigned int cpu;
1829 struct fc_exch_pool *pool;
1739 1830
1740 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) { 1831 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
1832 (min_xid & fc_cpu_mask) != 0) {
1741 FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n", 1833 FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1742 min_xid, max_xid); 1834 min_xid, max_xid);
1743 return NULL; 1835 return NULL;
1744 } 1836 }
1745 1837
1746 /* 1838 /*
1747 * Memory need for EM 1839 * allocate memory for EM
1748 */ 1840 */
1749#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2))) 1841 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
1750 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
1751 len += sizeof(struct fc_exch_mgr);
1752
1753 mp = kzalloc(len, GFP_ATOMIC);
1754 if (!mp) 1842 if (!mp)
1755 return NULL; 1843 return NULL;
1756 1844
1757 mp->class = class; 1845 mp->class = class;
1758 mp->total_exches = 0;
1759 mp->exches = (struct fc_exch **)(mp + 1);
1760 mp->lp = lp;
1761 /* adjust em exch xid range for offload */ 1846 /* adjust em exch xid range for offload */
1762 mp->min_xid = min_xid; 1847 mp->min_xid = min_xid;
1763 mp->max_xid = max_xid; 1848 mp->max_xid = max_xid;
1764 mp->last_xid = min_xid - 1;
1765 mp->max_read = 0;
1766 mp->last_read = 0;
1767 if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
1768 mp->max_read = lp->lro_xid;
1769 mp->last_read = min_xid - 1;
1770 mp->last_xid = mp->max_read;
1771 } else {
1772 /* disable lro if no xid control over read */
1773 lp->lro_enabled = 0;
1774 }
1775
1776 INIT_LIST_HEAD(&mp->ex_list);
1777 spin_lock_init(&mp->em_lock);
1778 1849
1779 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); 1850 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1780 if (!mp->ep_pool) 1851 if (!mp->ep_pool)
1781 goto free_mp; 1852 goto free_mp;
1782 1853
1854 /*
1855 * Setup per cpu exch pool with entire exchange id range equally
1856 * divided across all cpus. The exch pointers array memory is
1857 * allocated for exch range per pool.
1858 */
1859 pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
1860 mp->pool_max_index = pool_exch_range - 1;
1861
1862 /*
1863 * Allocate and initialize per cpu exch pool
1864 */
1865 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
1866 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
1867 if (!mp->pool)
1868 goto free_mempool;
1869 for_each_possible_cpu(cpu) {
1870 pool = per_cpu_ptr(mp->pool, cpu);
1871 spin_lock_init(&pool->lock);
1872 INIT_LIST_HEAD(&pool->ex_list);
1873 }
1874
1875 kref_init(&mp->kref);
1876 if (!fc_exch_mgr_add(lp, mp, match)) {
1877 free_percpu(mp->pool);
1878 goto free_mempool;
1879 }
1880
1881 /*
1882 * Above kref_init() sets mp->kref to 1 and then
1883 * call to fc_exch_mgr_add incremented mp->kref again,
1884 * so adjust that extra increment.
1885 */
1886 kref_put(&mp->kref, fc_exch_mgr_destroy);
1783 return mp; 1887 return mp;
1784 1888
1889free_mempool:
1890 mempool_destroy(mp->ep_pool);
1785free_mp: 1891free_mp:
1786 kfree(mp); 1892 kfree(mp);
1787 return NULL; 1893 return NULL;
1788} 1894}
1789EXPORT_SYMBOL(fc_exch_mgr_alloc); 1895EXPORT_SYMBOL(fc_exch_mgr_alloc);
1790 1896
1791void fc_exch_mgr_free(struct fc_exch_mgr *mp) 1897void fc_exch_mgr_free(struct fc_lport *lport)
1792{ 1898{
1793 WARN_ON(!mp); 1899 struct fc_exch_mgr_anchor *ema, *next;
1794 /* 1900
1795 * The total exch count must be zero 1901 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
1796 * before freeing exchange manager. 1902 fc_exch_mgr_del(ema);
1797 */
1798 WARN_ON(mp->total_exches != 0);
1799 mempool_destroy(mp->ep_pool);
1800 kfree(mp);
1801} 1903}
1802EXPORT_SYMBOL(fc_exch_mgr_free); 1904EXPORT_SYMBOL(fc_exch_mgr_free);
1803 1905
1804struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
1805{
1806 if (!lp || !lp->emp)
1807 return NULL;
1808
1809 return fc_exch_alloc(lp->emp, fp, 0);
1810}
1811EXPORT_SYMBOL(fc_exch_get);
1812 1906
1813struct fc_seq *fc_exch_seq_send(struct fc_lport *lp, 1907struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1814 struct fc_frame *fp, 1908 struct fc_frame *fp,
@@ -1823,7 +1917,7 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1823 struct fc_frame_header *fh; 1917 struct fc_frame_header *fh;
1824 int rc = 1; 1918 int rc = 1;
1825 1919
1826 ep = lp->tt.exch_get(lp, fp); 1920 ep = fc_exch_alloc(lp, fp);
1827 if (!ep) { 1921 if (!ep) {
1828 fc_frame_free(fp); 1922 fc_frame_free(fp);
1829 return NULL; 1923 return NULL;
@@ -1843,7 +1937,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1843 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 1937 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1844 sp->cnt++; 1938 sp->cnt++;
1845 1939
1846 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 1940 if (ep->xid <= lp->lro_xid)
1941 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1847 1942
1848 if (unlikely(lp->tt.frame_send(lp, fp))) 1943 if (unlikely(lp->tt.frame_send(lp, fp)))
1849 goto err; 1944 goto err;
@@ -1860,7 +1955,7 @@ err:
1860 rc = fc_exch_done_locked(ep); 1955 rc = fc_exch_done_locked(ep);
1861 spin_unlock_bh(&ep->ex_lock); 1956 spin_unlock_bh(&ep->ex_lock);
1862 if (!rc) 1957 if (!rc)
1863 fc_exch_mgr_delete_ep(ep); 1958 fc_exch_delete(ep);
1864 return NULL; 1959 return NULL;
1865} 1960}
1866EXPORT_SYMBOL(fc_exch_seq_send); 1961EXPORT_SYMBOL(fc_exch_seq_send);
@@ -1868,24 +1963,44 @@ EXPORT_SYMBOL(fc_exch_seq_send);
1868/* 1963/*
1869 * Receive a frame 1964 * Receive a frame
1870 */ 1965 */
1871void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp, 1966void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
1872 struct fc_frame *fp)
1873{ 1967{
1874 struct fc_frame_header *fh = fc_frame_header_get(fp); 1968 struct fc_frame_header *fh = fc_frame_header_get(fp);
1875 u32 f_ctl; 1969 struct fc_exch_mgr_anchor *ema;
1970 u32 f_ctl, found = 0;
1971 u16 oxid;
1876 1972
1877 /* lport lock ? */ 1973 /* lport lock ? */
1878 if (!lp || !mp || (lp->state == LPORT_ST_NONE)) { 1974 if (!lp || lp->state == LPORT_ST_DISABLED) {
1879 FC_LPORT_DBG(lp, "Receiving frames for an lport that " 1975 FC_LPORT_DBG(lp, "Receiving frames for an lport that "
1880 "has not been initialized correctly\n"); 1976 "has not been initialized correctly\n");
1881 fc_frame_free(fp); 1977 fc_frame_free(fp);
1882 return; 1978 return;
1883 } 1979 }
1884 1980
1981 f_ctl = ntoh24(fh->fh_f_ctl);
1982 oxid = ntohs(fh->fh_ox_id);
1983 if (f_ctl & FC_FC_EX_CTX) {
1984 list_for_each_entry(ema, &lp->ema_list, ema_list) {
1985 if ((oxid >= ema->mp->min_xid) &&
1986 (oxid <= ema->mp->max_xid)) {
1987 found = 1;
1988 break;
1989 }
1990 }
1991
1992 if (!found) {
1993 FC_LPORT_DBG(lp, "Received response for out "
1994 "of range oxid:%hx\n", oxid);
1995 fc_frame_free(fp);
1996 return;
1997 }
1998 } else
1999 ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list);
2000
1885 /* 2001 /*
1886 * If frame is marked invalid, just drop it. 2002 * If frame is marked invalid, just drop it.
1887 */ 2003 */
1888 f_ctl = ntoh24(fh->fh_f_ctl);
1889 switch (fr_eof(fp)) { 2004 switch (fr_eof(fp)) {
1890 case FC_EOF_T: 2005 case FC_EOF_T:
1891 if (f_ctl & FC_FC_END_SEQ) 2006 if (f_ctl & FC_FC_END_SEQ)
@@ -1893,34 +2008,24 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1893 /* fall through */ 2008 /* fall through */
1894 case FC_EOF_N: 2009 case FC_EOF_N:
1895 if (fh->fh_type == FC_TYPE_BLS) 2010 if (fh->fh_type == FC_TYPE_BLS)
1896 fc_exch_recv_bls(mp, fp); 2011 fc_exch_recv_bls(ema->mp, fp);
1897 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == 2012 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
1898 FC_FC_EX_CTX) 2013 FC_FC_EX_CTX)
1899 fc_exch_recv_seq_resp(mp, fp); 2014 fc_exch_recv_seq_resp(ema->mp, fp);
1900 else if (f_ctl & FC_FC_SEQ_CTX) 2015 else if (f_ctl & FC_FC_SEQ_CTX)
1901 fc_exch_recv_resp(mp, fp); 2016 fc_exch_recv_resp(ema->mp, fp);
1902 else 2017 else
1903 fc_exch_recv_req(lp, mp, fp); 2018 fc_exch_recv_req(lp, ema->mp, fp);
1904 break; 2019 break;
1905 default: 2020 default:
1906 FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp)); 2021 FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp));
1907 fc_frame_free(fp); 2022 fc_frame_free(fp);
1908 break;
1909 } 2023 }
1910} 2024}
1911EXPORT_SYMBOL(fc_exch_recv); 2025EXPORT_SYMBOL(fc_exch_recv);
1912 2026
1913int fc_exch_init(struct fc_lport *lp) 2027int fc_exch_init(struct fc_lport *lp)
1914{ 2028{
1915 if (!lp->tt.exch_get) {
1916 /*
1917 * exch_put() should be NULL if
1918 * exch_get() is NULL
1919 */
1920 WARN_ON(lp->tt.exch_put);
1921 lp->tt.exch_get = fc_exch_get;
1922 }
1923
1924 if (!lp->tt.seq_start_next) 2029 if (!lp->tt.seq_start_next)
1925 lp->tt.seq_start_next = fc_seq_start_next; 2030 lp->tt.seq_start_next = fc_seq_start_next;
1926 2031
@@ -1942,6 +2047,28 @@ int fc_exch_init(struct fc_lport *lp)
1942 if (!lp->tt.seq_exch_abort) 2047 if (!lp->tt.seq_exch_abort)
1943 lp->tt.seq_exch_abort = fc_seq_exch_abort; 2048 lp->tt.seq_exch_abort = fc_seq_exch_abort;
1944 2049
2050 /*
2051 * Initialize fc_cpu_mask and fc_cpu_order. The
2052 * fc_cpu_mask is set for nr_cpu_ids rounded up
2053 * to order of 2's * power and order is stored
2054 * in fc_cpu_order as this is later required in
2055 * mapping between an exch id and exch array index
2056 * in per cpu exch pool.
2057 *
2058 * This round up is required to align fc_cpu_mask
2059 * to exchange id's lower bits such that all incoming
2060 * frames of an exchange gets delivered to the same
2061 * cpu on which exchange originated by simple bitwise
2062 * AND operation between fc_cpu_mask and exchange id.
2063 */
2064 fc_cpu_mask = 1;
2065 fc_cpu_order = 0;
2066 while (fc_cpu_mask < nr_cpu_ids) {
2067 fc_cpu_mask <<= 1;
2068 fc_cpu_order++;
2069 }
2070 fc_cpu_mask--;
2071
1945 return 0; 2072 return 0;
1946} 2073}
1947EXPORT_SYMBOL(fc_exch_init); 2074EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index e303e0d12c4b..59a4408b27b5 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -507,33 +507,6 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
507 f_ctl = FC_FC_REL_OFF; 507 f_ctl = FC_FC_REL_OFF;
508 WARN_ON(!seq); 508 WARN_ON(!seq);
509 509
510 /*
511 * If a get_page()/put_page() will fail, don't use sg lists
512 * in the fc_frame structure.
513 *
514 * The put_page() may be long after the I/O has completed
515 * in the case of FCoE, since the network driver does it
516 * via free_skb(). See the test in free_pages_check().
517 *
518 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
519 */
520 if (using_sg) {
521 for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
522 if (page_count(sg_page(sg)) == 0 ||
523 (sg_page(sg)->flags & (1 << PG_lru |
524 1 << PG_private |
525 1 << PG_locked |
526 1 << PG_active |
527 1 << PG_slab |
528 1 << PG_swapcache |
529 1 << PG_writeback |
530 1 << PG_reserved |
531 1 << PG_buddy))) {
532 using_sg = 0;
533 break;
534 }
535 }
536 }
537 sg = scsi_sglist(sc); 510 sg = scsi_sglist(sc);
538 511
539 while (remaining > 0 && sg) { 512 while (remaining > 0 && sg) {
@@ -569,8 +542,6 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
569 } 542 }
570 sg_bytes = min(tlen, sg->length - offset); 543 sg_bytes = min(tlen, sg->length - offset);
571 if (using_sg) { 544 if (using_sg) {
572 WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
573 FC_FRAME_SG_LEN);
574 get_page(sg_page(sg)); 545 get_page(sg_page(sg));
575 skb_fill_page_desc(fp_skb(fp), 546 skb_fill_page_desc(fp_skb(fp),
576 skb_shinfo(fp_skb(fp))->nr_frags, 547 skb_shinfo(fp_skb(fp))->nr_frags,
@@ -1337,7 +1308,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1337 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1308 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1338 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS, 1309 fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
1339 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1310 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1340 if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp, 1311 if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp,
1341 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { 1312 fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1342 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1313 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
1343 return; 1314 return;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 745fa5555d6a..bd2f77197447 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -113,7 +113,7 @@ static void fc_lport_enter_ready(struct fc_lport *);
113static void fc_lport_enter_logo(struct fc_lport *); 113static void fc_lport_enter_logo(struct fc_lport *);
114 114
115static const char *fc_lport_state_names[] = { 115static const char *fc_lport_state_names[] = {
116 [LPORT_ST_NONE] = "none", 116 [LPORT_ST_DISABLED] = "disabled",
117 [LPORT_ST_FLOGI] = "FLOGI", 117 [LPORT_ST_FLOGI] = "FLOGI",
118 [LPORT_ST_DNS] = "dNS", 118 [LPORT_ST_DNS] = "dNS",
119 [LPORT_ST_RPN_ID] = "RPN_ID", 119 [LPORT_ST_RPN_ID] = "RPN_ID",
@@ -133,57 +133,44 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
133/** 133/**
134 * fc_lport_rport_callback() - Event handler for rport events 134 * fc_lport_rport_callback() - Event handler for rport events
135 * @lport: The lport which is receiving the event 135 * @lport: The lport which is receiving the event
136 * @rport: The rport which the event has occured on 136 * @rdata: private remote port data
137 * @event: The event that occured 137 * @event: The event that occured
138 * 138 *
139 * Locking Note: The rport lock should not be held when calling 139 * Locking Note: The rport lock should not be held when calling
140 * this function. 140 * this function.
141 */ 141 */
142static void fc_lport_rport_callback(struct fc_lport *lport, 142static void fc_lport_rport_callback(struct fc_lport *lport,
143 struct fc_rport *rport, 143 struct fc_rport_priv *rdata,
144 enum fc_rport_event event) 144 enum fc_rport_event event)
145{ 145{
146 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, 146 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
147 rport->port_id); 147 rdata->ids.port_id);
148 148
149 mutex_lock(&lport->lp_mutex);
149 switch (event) { 150 switch (event) {
150 case RPORT_EV_CREATED: 151 case RPORT_EV_READY:
151 if (rport->port_id == FC_FID_DIR_SERV) { 152 if (lport->state == LPORT_ST_DNS) {
152 mutex_lock(&lport->lp_mutex); 153 lport->dns_rp = rdata;
153 if (lport->state == LPORT_ST_DNS) { 154 fc_lport_enter_rpn_id(lport);
154 lport->dns_rp = rport; 155 } else {
155 fc_lport_enter_rpn_id(lport); 156 FC_LPORT_DBG(lport, "Received an READY event "
156 } else { 157 "on port (%6x) for the directory "
157 FC_LPORT_DBG(lport, "Received an CREATED event " 158 "server, but the lport is not "
158 "on port (%6x) for the directory " 159 "in the DNS state, it's in the "
159 "server, but the lport is not " 160 "%d state", rdata->ids.port_id,
160 "in the DNS state, it's in the " 161 lport->state);
161 "%d state", rport->port_id, 162 lport->tt.rport_logoff(rdata);
162 lport->state); 163 }
163 lport->tt.rport_logoff(rport);
164 }
165 mutex_unlock(&lport->lp_mutex);
166 } else
167 FC_LPORT_DBG(lport, "Received an event for port (%6x) "
168 "which is not the directory server\n",
169 rport->port_id);
170 break; 164 break;
171 case RPORT_EV_LOGO: 165 case RPORT_EV_LOGO:
172 case RPORT_EV_FAILED: 166 case RPORT_EV_FAILED:
173 case RPORT_EV_STOP: 167 case RPORT_EV_STOP:
174 if (rport->port_id == FC_FID_DIR_SERV) { 168 lport->dns_rp = NULL;
175 mutex_lock(&lport->lp_mutex);
176 lport->dns_rp = NULL;
177 mutex_unlock(&lport->lp_mutex);
178
179 } else
180 FC_LPORT_DBG(lport, "Received an event for port (%6x) "
181 "which is not the directory server\n",
182 rport->port_id);
183 break; 169 break;
184 case RPORT_EV_NONE: 170 case RPORT_EV_NONE:
185 break; 171 break;
186 } 172 }
173 mutex_unlock(&lport->lp_mutex);
187} 174}
188 175
189/** 176/**
@@ -211,20 +198,13 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
211 u32 remote_fid, u64 remote_wwpn, 198 u32 remote_fid, u64 remote_wwpn,
212 u64 remote_wwnn) 199 u64 remote_wwnn)
213{ 200{
214 struct fc_disc_port dp; 201 mutex_lock(&lport->disc.disc_mutex);
215 202 if (lport->ptp_rp)
216 dp.lp = lport;
217 dp.ids.port_id = remote_fid;
218 dp.ids.port_name = remote_wwpn;
219 dp.ids.node_name = remote_wwnn;
220 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
221
222 if (lport->ptp_rp) {
223 lport->tt.rport_logoff(lport->ptp_rp); 203 lport->tt.rport_logoff(lport->ptp_rp);
224 lport->ptp_rp = NULL; 204 lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
225 } 205 lport->ptp_rp->ids.port_name = remote_wwpn;
226 206 lport->ptp_rp->ids.node_name = remote_wwnn;
227 lport->ptp_rp = lport->tt.rport_create(&dp); 207 mutex_unlock(&lport->disc.disc_mutex);
228 208
229 lport->tt.rport_login(lport->ptp_rp); 209 lport->tt.rport_login(lport->ptp_rp);
230 210
@@ -472,56 +452,6 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
472} 452}
473 453
474/** 454/**
475 * fc_lport_recv_adisc_req() - Handle received Address Discovery Request
476 * @lport: Fibre Channel local port recieving the ADISC
477 * @sp: current sequence in the ADISC exchange
478 * @fp: ADISC request frame
479 *
480 * Locking Note: The lport lock is expected to be held before calling
481 * this function.
482 */
483static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
484 struct fc_lport *lport)
485{
486 struct fc_frame *fp;
487 struct fc_exch *ep = fc_seq_exch(sp);
488 struct fc_els_adisc *req, *rp;
489 struct fc_seq_els_data rjt_data;
490 size_t len;
491 u32 f_ctl;
492
493 FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
494 fc_lport_state(lport));
495
496 req = fc_frame_payload_get(in_fp, sizeof(*req));
497 if (!req) {
498 rjt_data.fp = NULL;
499 rjt_data.reason = ELS_RJT_LOGIC;
500 rjt_data.explan = ELS_EXPL_NONE;
501 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
502 } else {
503 len = sizeof(*rp);
504 fp = fc_frame_alloc(lport, len);
505 if (fp) {
506 rp = fc_frame_payload_get(fp, len);
507 memset(rp, 0, len);
508 rp->adisc_cmd = ELS_LS_ACC;
509 rp->adisc_wwpn = htonll(lport->wwpn);
510 rp->adisc_wwnn = htonll(lport->wwnn);
511 hton24(rp->adisc_port_id,
512 fc_host_port_id(lport->host));
513 sp = lport->tt.seq_start_next(sp);
514 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
515 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
516 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
517 FC_TYPE_ELS, f_ctl, 0);
518 lport->tt.seq_send(lport, sp, fp);
519 }
520 }
521 fc_frame_free(in_fp);
522}
523
524/**
525 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 455 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
526 * @lport: Fibre Channel local port recieving the LOGO 456 * @lport: Fibre Channel local port recieving the LOGO
527 * @sp: current sequence in the LOGO exchange 457 * @sp: current sequence in the LOGO exchange
@@ -550,7 +480,7 @@ int fc_fabric_login(struct fc_lport *lport)
550 int rc = -1; 480 int rc = -1;
551 481
552 mutex_lock(&lport->lp_mutex); 482 mutex_lock(&lport->lp_mutex);
553 if (lport->state == LPORT_ST_NONE) { 483 if (lport->state == LPORT_ST_DISABLED) {
554 fc_lport_enter_reset(lport); 484 fc_lport_enter_reset(lport);
555 rc = 0; 485 rc = 0;
556 } 486 }
@@ -637,12 +567,13 @@ EXPORT_SYMBOL(fc_fabric_logoff);
637int fc_lport_destroy(struct fc_lport *lport) 567int fc_lport_destroy(struct fc_lport *lport)
638{ 568{
639 mutex_lock(&lport->lp_mutex); 569 mutex_lock(&lport->lp_mutex);
640 lport->state = LPORT_ST_NONE; 570 lport->state = LPORT_ST_DISABLED;
641 lport->link_up = 0; 571 lport->link_up = 0;
642 lport->tt.frame_send = fc_frame_drop; 572 lport->tt.frame_send = fc_frame_drop;
643 mutex_unlock(&lport->lp_mutex); 573 mutex_unlock(&lport->lp_mutex);
644 574
645 lport->tt.fcp_abort_io(lport); 575 lport->tt.fcp_abort_io(lport);
576 lport->tt.disc_stop_final(lport);
646 lport->tt.exch_mgr_reset(lport, 0, 0); 577 lport->tt.exch_mgr_reset(lport, 0, 0);
647 return 0; 578 return 0;
648} 579}
@@ -722,7 +653,8 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
722 653
723 fc_lport_state_enter(lport, LPORT_ST_READY); 654 fc_lport_state_enter(lport, LPORT_ST_READY);
724 655
725 lport->tt.disc_start(fc_lport_disc_callback, lport); 656 if (!lport->ptp_rp)
657 lport->tt.disc_start(fc_lport_disc_callback, lport);
726} 658}
727 659
728/** 660/**
@@ -808,8 +740,6 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
808 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 740 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
809 get_unaligned_be64(&flp->fl_wwnn)); 741 get_unaligned_be64(&flp->fl_wwnn));
810 742
811 lport->tt.disc_start(fc_lport_disc_callback, lport);
812
813out: 743out:
814 sp = fr_seq(rx_fp); 744 sp = fr_seq(rx_fp);
815 fc_frame_free(rx_fp); 745 fc_frame_free(rx_fp);
@@ -832,10 +762,6 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
832{ 762{
833 struct fc_frame_header *fh = fc_frame_header_get(fp); 763 struct fc_frame_header *fh = fc_frame_header_get(fp);
834 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); 764 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
835 struct fc_rport *rport;
836 u32 s_id;
837 u32 d_id;
838 struct fc_seq_els_data rjt_data;
839 765
840 mutex_lock(&lport->lp_mutex); 766 mutex_lock(&lport->lp_mutex);
841 767
@@ -844,11 +770,14 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
844 * RSCN here. These don't require a session. 770 * RSCN here. These don't require a session.
845 * Even if we had a session, it might not be ready. 771 * Even if we had a session, it might not be ready.
846 */ 772 */
847 if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) { 773 if (!lport->link_up)
774 fc_frame_free(fp);
775 else if (fh->fh_type == FC_TYPE_ELS &&
776 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
848 /* 777 /*
849 * Check opcode. 778 * Check opcode.
850 */ 779 */
851 recv = NULL; 780 recv = lport->tt.rport_recv_req;
852 switch (fc_frame_payload_op(fp)) { 781 switch (fc_frame_payload_op(fp)) {
853 case ELS_FLOGI: 782 case ELS_FLOGI:
854 recv = fc_lport_recv_flogi_req; 783 recv = fc_lport_recv_flogi_req;
@@ -870,34 +799,9 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
870 case ELS_RNID: 799 case ELS_RNID:
871 recv = fc_lport_recv_rnid_req; 800 recv = fc_lport_recv_rnid_req;
872 break; 801 break;
873 case ELS_ADISC:
874 recv = fc_lport_recv_adisc_req;
875 break;
876 } 802 }
877 803
878 if (recv) 804 recv(sp, fp, lport);
879 recv(sp, fp, lport);
880 else {
881 /*
882 * Find session.
883 * If this is a new incoming PLOGI, we won't find it.
884 */
885 s_id = ntoh24(fh->fh_s_id);
886 d_id = ntoh24(fh->fh_d_id);
887
888 rport = lport->tt.rport_lookup(lport, s_id);
889 if (rport)
890 lport->tt.rport_recv_req(sp, fp, rport);
891 else {
892 rjt_data.fp = NULL;
893 rjt_data.reason = ELS_RJT_UNAB;
894 rjt_data.explan = ELS_EXPL_NONE;
895 lport->tt.seq_els_rsp_send(sp,
896 ELS_LS_RJT,
897 &rjt_data);
898 fc_frame_free(fp);
899 }
900 }
901 } else { 805 } else {
902 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", 806 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
903 fr_eof(fp)); 807 fr_eof(fp));
@@ -930,38 +834,61 @@ int fc_lport_reset(struct fc_lport *lport)
930EXPORT_SYMBOL(fc_lport_reset); 834EXPORT_SYMBOL(fc_lport_reset);
931 835
932/** 836/**
933 * fc_rport_enter_reset() - Reset the local port 837 * fc_lport_reset_locked() - Reset the local port
934 * @lport: Fibre Channel local port to be reset 838 * @lport: Fibre Channel local port to be reset
935 * 839 *
936 * Locking Note: The lport lock is expected to be held before calling 840 * Locking Note: The lport lock is expected to be held before calling
937 * this routine. 841 * this routine.
938 */ 842 */
939static void fc_lport_enter_reset(struct fc_lport *lport) 843static void fc_lport_reset_locked(struct fc_lport *lport)
940{ 844{
941 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
942 fc_lport_state(lport));
943
944 fc_lport_state_enter(lport, LPORT_ST_RESET);
945
946 if (lport->dns_rp) 845 if (lport->dns_rp)
947 lport->tt.rport_logoff(lport->dns_rp); 846 lport->tt.rport_logoff(lport->dns_rp);
948 847
949 if (lport->ptp_rp) { 848 lport->ptp_rp = NULL;
950 lport->tt.rport_logoff(lport->ptp_rp);
951 lport->ptp_rp = NULL;
952 }
953 849
954 lport->tt.disc_stop(lport); 850 lport->tt.disc_stop(lport);
955 851
956 lport->tt.exch_mgr_reset(lport, 0, 0); 852 lport->tt.exch_mgr_reset(lport, 0, 0);
957 fc_host_fabric_name(lport->host) = 0; 853 fc_host_fabric_name(lport->host) = 0;
958 fc_host_port_id(lport->host) = 0; 854 fc_host_port_id(lport->host) = 0;
855}
959 856
857/**
858 * fc_lport_enter_reset() - Reset the local port
859 * @lport: Fibre Channel local port to be reset
860 *
861 * Locking Note: The lport lock is expected to be held before calling
862 * this routine.
863 */
864static void fc_lport_enter_reset(struct fc_lport *lport)
865{
866 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
867 fc_lport_state(lport));
868
869 fc_lport_state_enter(lport, LPORT_ST_RESET);
870 fc_lport_reset_locked(lport);
960 if (lport->link_up) 871 if (lport->link_up)
961 fc_lport_enter_flogi(lport); 872 fc_lport_enter_flogi(lport);
962} 873}
963 874
964/** 875/**
876 * fc_lport_enter_disabled() - disable the local port
877 * @lport: Fibre Channel local port to be reset
878 *
879 * Locking Note: The lport lock is expected to be held before calling
880 * this routine.
881 */
882static void fc_lport_enter_disabled(struct fc_lport *lport)
883{
884 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
885 fc_lport_state(lport));
886
887 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
888 fc_lport_reset_locked(lport);
889}
890
891/**
965 * fc_lport_error() - Handler for any errors 892 * fc_lport_error() - Handler for any errors
966 * @lport: The fc_lport object 893 * @lport: The fc_lport object
967 * @fp: The frame pointer 894 * @fp: The frame pointer
@@ -992,7 +919,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
992 schedule_delayed_work(&lport->retry_work, delay); 919 schedule_delayed_work(&lport->retry_work, delay);
993 } else { 920 } else {
994 switch (lport->state) { 921 switch (lport->state) {
995 case LPORT_ST_NONE: 922 case LPORT_ST_DISABLED:
996 case LPORT_ST_READY: 923 case LPORT_ST_READY:
997 case LPORT_ST_RESET: 924 case LPORT_ST_RESET:
998 case LPORT_ST_RPN_ID: 925 case LPORT_ST_RPN_ID:
@@ -1026,13 +953,13 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1026 struct fc_frame_header *fh; 953 struct fc_frame_header *fh;
1027 struct fc_ct_hdr *ct; 954 struct fc_ct_hdr *ct;
1028 955
956 FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
957
1029 if (fp == ERR_PTR(-FC_EX_CLOSED)) 958 if (fp == ERR_PTR(-FC_EX_CLOSED))
1030 return; 959 return;
1031 960
1032 mutex_lock(&lport->lp_mutex); 961 mutex_lock(&lport->lp_mutex);
1033 962
1034 FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
1035
1036 if (lport->state != LPORT_ST_RFT_ID) { 963 if (lport->state != LPORT_ST_RFT_ID) {
1037 FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state " 964 FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
1038 "%s\n", fc_lport_state(lport)); 965 "%s\n", fc_lport_state(lport));
@@ -1080,13 +1007,13 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1080 struct fc_frame_header *fh; 1007 struct fc_frame_header *fh;
1081 struct fc_ct_hdr *ct; 1008 struct fc_ct_hdr *ct;
1082 1009
1010 FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp));
1011
1083 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1012 if (fp == ERR_PTR(-FC_EX_CLOSED))
1084 return; 1013 return;
1085 1014
1086 mutex_lock(&lport->lp_mutex); 1015 mutex_lock(&lport->lp_mutex);
1087 1016
1088 FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
1089
1090 if (lport->state != LPORT_ST_RPN_ID) { 1017 if (lport->state != LPORT_ST_RPN_ID) {
1091 FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " 1018 FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
1092 "%s\n", fc_lport_state(lport)); 1019 "%s\n", fc_lport_state(lport));
@@ -1132,13 +1059,13 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1132 struct fc_lport *lport = lp_arg; 1059 struct fc_lport *lport = lp_arg;
1133 u8 op; 1060 u8 op;
1134 1061
1062 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1063
1135 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1064 if (fp == ERR_PTR(-FC_EX_CLOSED))
1136 return; 1065 return;
1137 1066
1138 mutex_lock(&lport->lp_mutex); 1067 mutex_lock(&lport->lp_mutex);
1139 1068
1140 FC_LPORT_DBG(lport, "Received a SCR response\n");
1141
1142 if (lport->state != LPORT_ST_SCR) { 1069 if (lport->state != LPORT_ST_SCR) {
1143 FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1070 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1144 "%s\n", fc_lport_state(lport)); 1071 "%s\n", fc_lport_state(lport));
@@ -1186,7 +1113,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
1186 return; 1113 return;
1187 } 1114 }
1188 1115
1189 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR, 1116 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1190 fc_lport_scr_resp, lport, lport->e_d_tov)) 1117 fc_lport_scr_resp, lport, lport->e_d_tov))
1191 fc_lport_error(lport, fp); 1118 fc_lport_error(lport, fp);
1192} 1119}
@@ -1227,7 +1154,7 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
1227 return; 1154 return;
1228 } 1155 }
1229 1156
1230 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID, 1157 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
1231 fc_lport_rft_id_resp, 1158 fc_lport_rft_id_resp,
1232 lport, lport->e_d_tov)) 1159 lport, lport->e_d_tov))
1233 fc_lport_error(lport, fp); 1160 fc_lport_error(lport, fp);
@@ -1256,7 +1183,7 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
1256 return; 1183 return;
1257 } 1184 }
1258 1185
1259 if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID, 1186 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
1260 fc_lport_rpn_id_resp, 1187 fc_lport_rpn_id_resp,
1261 lport, lport->e_d_tov)) 1188 lport, lport->e_d_tov))
1262 fc_lport_error(lport, fp); 1189 fc_lport_error(lport, fp);
@@ -1275,28 +1202,21 @@ static struct fc_rport_operations fc_lport_rport_ops = {
1275 */ 1202 */
1276static void fc_lport_enter_dns(struct fc_lport *lport) 1203static void fc_lport_enter_dns(struct fc_lport *lport)
1277{ 1204{
1278 struct fc_rport *rport; 1205 struct fc_rport_priv *rdata;
1279 struct fc_rport_libfc_priv *rdata;
1280 struct fc_disc_port dp;
1281
1282 dp.ids.port_id = FC_FID_DIR_SERV;
1283 dp.ids.port_name = -1;
1284 dp.ids.node_name = -1;
1285 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
1286 dp.lp = lport;
1287 1206
1288 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1207 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1289 fc_lport_state(lport)); 1208 fc_lport_state(lport));
1290 1209
1291 fc_lport_state_enter(lport, LPORT_ST_DNS); 1210 fc_lport_state_enter(lport, LPORT_ST_DNS);
1292 1211
1293 rport = lport->tt.rport_create(&dp); 1212 mutex_lock(&lport->disc.disc_mutex);
1294 if (!rport) 1213 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
1214 mutex_unlock(&lport->disc.disc_mutex);
1215 if (!rdata)
1295 goto err; 1216 goto err;
1296 1217
1297 rdata = rport->dd_data;
1298 rdata->ops = &fc_lport_rport_ops; 1218 rdata->ops = &fc_lport_rport_ops;
1299 lport->tt.rport_login(rport); 1219 lport->tt.rport_login(rdata);
1300 return; 1220 return;
1301 1221
1302err: 1222err:
@@ -1316,7 +1236,7 @@ static void fc_lport_timeout(struct work_struct *work)
1316 mutex_lock(&lport->lp_mutex); 1236 mutex_lock(&lport->lp_mutex);
1317 1237
1318 switch (lport->state) { 1238 switch (lport->state) {
1319 case LPORT_ST_NONE: 1239 case LPORT_ST_DISABLED:
1320 case LPORT_ST_READY: 1240 case LPORT_ST_READY:
1321 case LPORT_ST_RESET: 1241 case LPORT_ST_RESET:
1322 WARN_ON(1); 1242 WARN_ON(1);
@@ -1360,13 +1280,13 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1360 struct fc_lport *lport = lp_arg; 1280 struct fc_lport *lport = lp_arg;
1361 u8 op; 1281 u8 op;
1362 1282
1283 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1284
1363 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1285 if (fp == ERR_PTR(-FC_EX_CLOSED))
1364 return; 1286 return;
1365 1287
1366 mutex_lock(&lport->lp_mutex); 1288 mutex_lock(&lport->lp_mutex);
1367 1289
1368 FC_LPORT_DBG(lport, "Received a LOGO response\n");
1369
1370 if (lport->state != LPORT_ST_LOGO) { 1290 if (lport->state != LPORT_ST_LOGO) {
1371 FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1291 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1372 "%s\n", fc_lport_state(lport)); 1292 "%s\n", fc_lport_state(lport));
@@ -1382,7 +1302,7 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1382 1302
1383 op = fc_frame_payload_op(fp); 1303 op = fc_frame_payload_op(fp);
1384 if (op == ELS_LS_ACC) 1304 if (op == ELS_LS_ACC)
1385 fc_lport_enter_reset(lport); 1305 fc_lport_enter_disabled(lport);
1386 else 1306 else
1387 fc_lport_error(lport, fp); 1307 fc_lport_error(lport, fp);
1388 1308
@@ -1415,8 +1335,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
1415 return; 1335 return;
1416 } 1336 }
1417 1337
1418 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp, 1338 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1419 lport, lport->e_d_tov)) 1339 fc_lport_logo_resp, lport, lport->e_d_tov))
1420 fc_lport_error(lport, fp); 1340 fc_lport_error(lport, fp);
1421} 1341}
1422 1342
@@ -1442,13 +1362,13 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1442 unsigned int e_d_tov; 1362 unsigned int e_d_tov;
1443 u16 mfs; 1363 u16 mfs;
1444 1364
1365 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1366
1445 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1367 if (fp == ERR_PTR(-FC_EX_CLOSED))
1446 return; 1368 return;
1447 1369
1448 mutex_lock(&lport->lp_mutex); 1370 mutex_lock(&lport->lp_mutex);
1449 1371
1450 FC_LPORT_DBG(lport, "Received a FLOGI response\n");
1451
1452 if (lport->state != LPORT_ST_FLOGI) { 1372 if (lport->state != LPORT_ST_FLOGI) {
1453 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1373 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1454 "%s\n", fc_lport_state(lport)); 1374 "%s\n", fc_lport_state(lport));
@@ -1501,14 +1421,6 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1501 fc_lport_enter_dns(lport); 1421 fc_lport_enter_dns(lport);
1502 } 1422 }
1503 } 1423 }
1504
1505 if (flp) {
1506 csp_flags = ntohs(flp->fl_csp.sp_features);
1507 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1508 lport->tt.disc_start(fc_lport_disc_callback,
1509 lport);
1510 }
1511 }
1512 } else { 1424 } else {
1513 FC_LPORT_DBG(lport, "Bad FLOGI response\n"); 1425 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
1514 } 1426 }
@@ -1539,7 +1451,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
1539 if (!fp) 1451 if (!fp)
1540 return fc_lport_error(lport, fp); 1452 return fc_lport_error(lport, fp);
1541 1453
1542 if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI, 1454 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
1543 fc_lport_flogi_resp, lport, lport->e_d_tov)) 1455 fc_lport_flogi_resp, lport, lport->e_d_tov))
1544 fc_lport_error(lport, fp); 1456 fc_lport_error(lport, fp);
1545} 1457}
@@ -1550,7 +1462,7 @@ int fc_lport_config(struct fc_lport *lport)
1550 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1462 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1551 mutex_init(&lport->lp_mutex); 1463 mutex_init(&lport->lp_mutex);
1552 1464
1553 fc_lport_state_enter(lport, LPORT_ST_NONE); 1465 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1554 1466
1555 fc_lport_add_fc4_type(lport, FC_TYPE_FCP); 1467 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1556 fc_lport_add_fc4_type(lport, FC_TYPE_CT); 1468 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
@@ -1588,6 +1500,7 @@ int fc_lport_init(struct fc_lport *lport)
1588 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1500 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1589 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1501 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1590 1502
1503 INIT_LIST_HEAD(&lport->ema_list);
1591 return 0; 1504 return 0;
1592} 1505}
1593EXPORT_SYMBOL(fc_lport_init); 1506EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 7162385f52eb..03ea6748e7ee 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -57,94 +57,114 @@
57 57
58struct workqueue_struct *rport_event_queue; 58struct workqueue_struct *rport_event_queue;
59 59
60static void fc_rport_enter_plogi(struct fc_rport *); 60static void fc_rport_enter_plogi(struct fc_rport_priv *);
61static void fc_rport_enter_prli(struct fc_rport *); 61static void fc_rport_enter_prli(struct fc_rport_priv *);
62static void fc_rport_enter_rtv(struct fc_rport *); 62static void fc_rport_enter_rtv(struct fc_rport_priv *);
63static void fc_rport_enter_ready(struct fc_rport *); 63static void fc_rport_enter_ready(struct fc_rport_priv *);
64static void fc_rport_enter_logo(struct fc_rport *); 64static void fc_rport_enter_logo(struct fc_rport_priv *);
65 65static void fc_rport_enter_adisc(struct fc_rport_priv *);
66static void fc_rport_recv_plogi_req(struct fc_rport *, 66
67static void fc_rport_recv_plogi_req(struct fc_lport *,
67 struct fc_seq *, struct fc_frame *); 68 struct fc_seq *, struct fc_frame *);
68static void fc_rport_recv_prli_req(struct fc_rport *, 69static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69 struct fc_seq *, struct fc_frame *); 70 struct fc_seq *, struct fc_frame *);
70static void fc_rport_recv_prlo_req(struct fc_rport *, 71static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71 struct fc_seq *, struct fc_frame *); 72 struct fc_seq *, struct fc_frame *);
72static void fc_rport_recv_logo_req(struct fc_rport *, 73static void fc_rport_recv_logo_req(struct fc_lport *,
73 struct fc_seq *, struct fc_frame *); 74 struct fc_seq *, struct fc_frame *);
74static void fc_rport_timeout(struct work_struct *); 75static void fc_rport_timeout(struct work_struct *);
75static void fc_rport_error(struct fc_rport *, struct fc_frame *); 76static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *); 77static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77static void fc_rport_work(struct work_struct *); 78static void fc_rport_work(struct work_struct *);
78 79
79static const char *fc_rport_state_names[] = { 80static const char *fc_rport_state_names[] = {
80 [RPORT_ST_NONE] = "None",
81 [RPORT_ST_INIT] = "Init", 81 [RPORT_ST_INIT] = "Init",
82 [RPORT_ST_PLOGI] = "PLOGI", 82 [RPORT_ST_PLOGI] = "PLOGI",
83 [RPORT_ST_PRLI] = "PRLI", 83 [RPORT_ST_PRLI] = "PRLI",
84 [RPORT_ST_RTV] = "RTV", 84 [RPORT_ST_RTV] = "RTV",
85 [RPORT_ST_READY] = "Ready", 85 [RPORT_ST_READY] = "Ready",
86 [RPORT_ST_LOGO] = "LOGO", 86 [RPORT_ST_LOGO] = "LOGO",
87 [RPORT_ST_ADISC] = "ADISC",
88 [RPORT_ST_DELETE] = "Delete",
87}; 89};
88 90
89static void fc_rport_rogue_destroy(struct device *dev) 91/**
92 * fc_rport_lookup() - lookup a remote port by port_id
93 * @lport: Fibre Channel host port instance
94 * @port_id: remote port port_id to match
95 */
96static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
97 u32 port_id)
90{ 98{
91 struct fc_rport *rport = dev_to_rport(dev); 99 struct fc_rport_priv *rdata;
92 FC_RPORT_DBG(rport, "Destroying rogue rport\n"); 100
93 kfree(rport); 101 list_for_each_entry(rdata, &lport->disc.rports, peers)
102 if (rdata->ids.port_id == port_id &&
103 rdata->rp_state != RPORT_ST_DELETE)
104 return rdata;
105 return NULL;
94} 106}
95 107
96struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) 108/**
109 * fc_rport_create() - Create a new remote port
110 * @lport: The local port that the new remote port is for
111 * @port_id: The port ID for the new remote port
112 *
113 * Locking note: must be called with the disc_mutex held.
114 */
115static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
116 u32 port_id)
97{ 117{
98 struct fc_rport *rport; 118 struct fc_rport_priv *rdata;
99 struct fc_rport_libfc_priv *rdata;
100 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
101 119
102 if (!rport) 120 rdata = lport->tt.rport_lookup(lport, port_id);
103 return NULL; 121 if (rdata)
122 return rdata;
104 123
105 rdata = RPORT_TO_PRIV(rport); 124 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
125 if (!rdata)
126 return NULL;
106 127
107 rport->dd_data = rdata; 128 rdata->ids.node_name = -1;
108 rport->port_id = dp->ids.port_id; 129 rdata->ids.port_name = -1;
109 rport->port_name = dp->ids.port_name; 130 rdata->ids.port_id = port_id;
110 rport->node_name = dp->ids.node_name; 131 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
111 rport->roles = dp->ids.roles;
112 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
113 /*
114 * Note: all this libfc rogue rport code will be removed for
115 * upstream so it fine that this is really ugly and hacky right now.
116 */
117 device_initialize(&rport->dev);
118 rport->dev.release = fc_rport_rogue_destroy;
119 132
133 kref_init(&rdata->kref);
120 mutex_init(&rdata->rp_mutex); 134 mutex_init(&rdata->rp_mutex);
121 rdata->local_port = dp->lp; 135 rdata->local_port = lport;
122 rdata->trans_state = FC_PORTSTATE_ROGUE;
123 rdata->rp_state = RPORT_ST_INIT; 136 rdata->rp_state = RPORT_ST_INIT;
124 rdata->event = RPORT_EV_NONE; 137 rdata->event = RPORT_EV_NONE;
125 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; 138 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
126 rdata->ops = NULL; 139 rdata->e_d_tov = lport->e_d_tov;
127 rdata->e_d_tov = dp->lp->e_d_tov; 140 rdata->r_a_tov = lport->r_a_tov;
128 rdata->r_a_tov = dp->lp->r_a_tov; 141 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
129 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); 142 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
130 INIT_WORK(&rdata->event_work, fc_rport_work); 143 INIT_WORK(&rdata->event_work, fc_rport_work);
131 /* 144 if (port_id != FC_FID_DIR_SERV)
132 * For good measure, but not necessary as we should only 145 list_add(&rdata->peers, &lport->disc.rports);
133 * add REAL rport to the lport list. 146 return rdata;
134 */ 147}
135 INIT_LIST_HEAD(&rdata->peers); 148
149/**
150 * fc_rport_destroy() - free a remote port after last reference is released.
151 * @kref: pointer to kref inside struct fc_rport_priv
152 */
153static void fc_rport_destroy(struct kref *kref)
154{
155 struct fc_rport_priv *rdata;
136 156
137 return rport; 157 rdata = container_of(kref, struct fc_rport_priv, kref);
158 kfree(rdata);
138} 159}
139 160
140/** 161/**
141 * fc_rport_state() - return a string for the state the rport is in 162 * fc_rport_state() - return a string for the state the rport is in
142 * @rport: The rport whose state we want to get a string for 163 * @rdata: remote port private data
143 */ 164 */
144static const char *fc_rport_state(struct fc_rport *rport) 165static const char *fc_rport_state(struct fc_rport_priv *rdata)
145{ 166{
146 const char *cp; 167 const char *cp;
147 struct fc_rport_libfc_priv *rdata = rport->dd_data;
148 168
149 cp = fc_rport_state_names[rdata->rp_state]; 169 cp = fc_rport_state_names[rdata->rp_state];
150 if (!cp) 170 if (!cp)
@@ -191,15 +211,14 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
191 211
192/** 212/**
193 * fc_rport_state_enter() - Change the rport's state 213 * fc_rport_state_enter() - Change the rport's state
194 * @rport: The rport whose state should change 214 * @rdata: The rport whose state should change
195 * @new: The new state of the rport 215 * @new: The new state of the rport
196 * 216 *
197 * Locking Note: Called with the rport lock held 217 * Locking Note: Called with the rport lock held
198 */ 218 */
199static void fc_rport_state_enter(struct fc_rport *rport, 219static void fc_rport_state_enter(struct fc_rport_priv *rdata,
200 enum fc_rport_state new) 220 enum fc_rport_state new)
201{ 221{
202 struct fc_rport_libfc_priv *rdata = rport->dd_data;
203 if (rdata->rp_state != new) 222 if (rdata->rp_state != new)
204 rdata->retries = 0; 223 rdata->retries = 0;
205 rdata->rp_state = new; 224 rdata->rp_state = new;
@@ -208,147 +227,187 @@ static void fc_rport_state_enter(struct fc_rport *rport,
208static void fc_rport_work(struct work_struct *work) 227static void fc_rport_work(struct work_struct *work)
209{ 228{
210 u32 port_id; 229 u32 port_id;
211 struct fc_rport_libfc_priv *rdata = 230 struct fc_rport_priv *rdata =
212 container_of(work, struct fc_rport_libfc_priv, event_work); 231 container_of(work, struct fc_rport_priv, event_work);
232 struct fc_rport_libfc_priv *rp;
213 enum fc_rport_event event; 233 enum fc_rport_event event;
214 enum fc_rport_trans_state trans_state;
215 struct fc_lport *lport = rdata->local_port; 234 struct fc_lport *lport = rdata->local_port;
216 struct fc_rport_operations *rport_ops; 235 struct fc_rport_operations *rport_ops;
217 struct fc_rport *rport = PRIV_TO_RPORT(rdata); 236 struct fc_rport_identifiers ids;
237 struct fc_rport *rport;
218 238
219 mutex_lock(&rdata->rp_mutex); 239 mutex_lock(&rdata->rp_mutex);
220 event = rdata->event; 240 event = rdata->event;
221 rport_ops = rdata->ops; 241 rport_ops = rdata->ops;
242 rport = rdata->rport;
222 243
223 if (event == RPORT_EV_CREATED) { 244 FC_RPORT_DBG(rdata, "work event %u\n", event);
224 struct fc_rport *new_rport;
225 struct fc_rport_libfc_priv *new_rdata;
226 struct fc_rport_identifiers ids;
227 245
228 ids.port_id = rport->port_id; 246 switch (event) {
229 ids.roles = rport->roles; 247 case RPORT_EV_READY:
230 ids.port_name = rport->port_name; 248 ids = rdata->ids;
231 ids.node_name = rport->node_name; 249 rdata->event = RPORT_EV_NONE;
250 kref_get(&rdata->kref);
251 mutex_unlock(&rdata->rp_mutex);
232 252
253 if (!rport)
254 rport = fc_remote_port_add(lport->host, 0, &ids);
255 if (!rport) {
256 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
257 lport->tt.rport_logoff(rdata);
258 kref_put(&rdata->kref, lport->tt.rport_destroy);
259 return;
260 }
261 mutex_lock(&rdata->rp_mutex);
262 if (rdata->rport)
263 FC_RPORT_DBG(rdata, "rport already allocated\n");
264 rdata->rport = rport;
265 rport->maxframe_size = rdata->maxframe_size;
266 rport->supported_classes = rdata->supported_classes;
267
268 rp = rport->dd_data;
269 rp->local_port = lport;
270 rp->rp_state = rdata->rp_state;
271 rp->flags = rdata->flags;
272 rp->e_d_tov = rdata->e_d_tov;
273 rp->r_a_tov = rdata->r_a_tov;
233 mutex_unlock(&rdata->rp_mutex); 274 mutex_unlock(&rdata->rp_mutex);
234 275
235 new_rport = fc_remote_port_add(lport->host, 0, &ids); 276 if (rport_ops && rport_ops->event_callback) {
236 if (new_rport) { 277 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
237 /* 278 rport_ops->event_callback(lport, rdata, event);
238 * Switch from the rogue rport to the rport
239 * returned by the FC class.
240 */
241 new_rport->maxframe_size = rport->maxframe_size;
242
243 new_rdata = new_rport->dd_data;
244 new_rdata->e_d_tov = rdata->e_d_tov;
245 new_rdata->r_a_tov = rdata->r_a_tov;
246 new_rdata->ops = rdata->ops;
247 new_rdata->local_port = rdata->local_port;
248 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
249 new_rdata->trans_state = FC_PORTSTATE_REAL;
250 mutex_init(&new_rdata->rp_mutex);
251 INIT_DELAYED_WORK(&new_rdata->retry_work,
252 fc_rport_timeout);
253 INIT_LIST_HEAD(&new_rdata->peers);
254 INIT_WORK(&new_rdata->event_work, fc_rport_work);
255
256 fc_rport_state_enter(new_rport, RPORT_ST_READY);
257 } else {
258 printk(KERN_WARNING "libfc: Failed to allocate "
259 " memory for rport (%6x)\n", ids.port_id);
260 event = RPORT_EV_FAILED;
261 } 279 }
262 if (rport->port_id != FC_FID_DIR_SERV) 280 kref_put(&rdata->kref, lport->tt.rport_destroy);
263 if (rport_ops->event_callback) 281 break;
264 rport_ops->event_callback(lport, rport, 282
265 RPORT_EV_FAILED); 283 case RPORT_EV_FAILED:
266 put_device(&rport->dev); 284 case RPORT_EV_LOGO:
267 rport = new_rport; 285 case RPORT_EV_STOP:
268 rdata = new_rport->dd_data; 286 port_id = rdata->ids.port_id;
269 if (rport_ops->event_callback)
270 rport_ops->event_callback(lport, rport, event);
271 } else if ((event == RPORT_EV_FAILED) ||
272 (event == RPORT_EV_LOGO) ||
273 (event == RPORT_EV_STOP)) {
274 trans_state = rdata->trans_state;
275 mutex_unlock(&rdata->rp_mutex); 287 mutex_unlock(&rdata->rp_mutex);
276 if (rport_ops->event_callback) 288
277 rport_ops->event_callback(lport, rport, event); 289 if (port_id != FC_FID_DIR_SERV) {
278 if (trans_state == FC_PORTSTATE_ROGUE) 290 mutex_lock(&lport->disc.disc_mutex);
279 put_device(&rport->dev); 291 list_del(&rdata->peers);
280 else { 292 mutex_unlock(&lport->disc.disc_mutex);
281 port_id = rport->port_id; 293 }
294
295 if (rport_ops && rport_ops->event_callback) {
296 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
297 rport_ops->event_callback(lport, rdata, event);
298 }
299 cancel_delayed_work_sync(&rdata->retry_work);
300
301 /*
302 * Reset any outstanding exchanges before freeing rport.
303 */
304 lport->tt.exch_mgr_reset(lport, 0, port_id);
305 lport->tt.exch_mgr_reset(lport, port_id, 0);
306
307 if (rport) {
308 rp = rport->dd_data;
309 rp->rp_state = RPORT_ST_DELETE;
310 mutex_lock(&rdata->rp_mutex);
311 rdata->rport = NULL;
312 mutex_unlock(&rdata->rp_mutex);
282 fc_remote_port_delete(rport); 313 fc_remote_port_delete(rport);
283 lport->tt.exch_mgr_reset(lport, 0, port_id);
284 lport->tt.exch_mgr_reset(lport, port_id, 0);
285 } 314 }
286 } else 315 kref_put(&rdata->kref, lport->tt.rport_destroy);
316 break;
317
318 default:
287 mutex_unlock(&rdata->rp_mutex); 319 mutex_unlock(&rdata->rp_mutex);
320 break;
321 }
288} 322}
289 323
290/** 324/**
291 * fc_rport_login() - Start the remote port login state machine 325 * fc_rport_login() - Start the remote port login state machine
292 * @rport: Fibre Channel remote port 326 * @rdata: private remote port
293 * 327 *
294 * Locking Note: Called without the rport lock held. This 328 * Locking Note: Called without the rport lock held. This
295 * function will hold the rport lock, call an _enter_* 329 * function will hold the rport lock, call an _enter_*
296 * function and then unlock the rport. 330 * function and then unlock the rport.
331 *
332 * This indicates the intent to be logged into the remote port.
333 * If it appears we are already logged in, ADISC is used to verify
334 * the setup.
297 */ 335 */
298int fc_rport_login(struct fc_rport *rport) 336int fc_rport_login(struct fc_rport_priv *rdata)
299{ 337{
300 struct fc_rport_libfc_priv *rdata = rport->dd_data;
301
302 mutex_lock(&rdata->rp_mutex); 338 mutex_lock(&rdata->rp_mutex);
303 339
304 FC_RPORT_DBG(rport, "Login to port\n"); 340 switch (rdata->rp_state) {
305 341 case RPORT_ST_READY:
306 fc_rport_enter_plogi(rport); 342 FC_RPORT_DBG(rdata, "ADISC port\n");
307 343 fc_rport_enter_adisc(rdata);
344 break;
345 default:
346 FC_RPORT_DBG(rdata, "Login to port\n");
347 fc_rport_enter_plogi(rdata);
348 break;
349 }
308 mutex_unlock(&rdata->rp_mutex); 350 mutex_unlock(&rdata->rp_mutex);
309 351
310 return 0; 352 return 0;
311} 353}
312 354
313/** 355/**
356 * fc_rport_enter_delete() - schedule a remote port to be deleted.
357 * @rdata: private remote port
358 * @event: event to report as the reason for deletion
359 *
360 * Locking Note: Called with the rport lock held.
361 *
362 * Allow state change into DELETE only once.
363 *
364 * Call queue_work only if there's no event already pending.
365 * Set the new event so that the old pending event will not occur.
366 * Since we have the mutex, even if fc_rport_work() is already started,
367 * it'll see the new event.
368 */
369static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
370 enum fc_rport_event event)
371{
372 if (rdata->rp_state == RPORT_ST_DELETE)
373 return;
374
375 FC_RPORT_DBG(rdata, "Delete port\n");
376
377 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
378
379 if (rdata->event == RPORT_EV_NONE)
380 queue_work(rport_event_queue, &rdata->event_work);
381 rdata->event = event;
382}
383
384/**
314 * fc_rport_logoff() - Logoff and remove an rport 385 * fc_rport_logoff() - Logoff and remove an rport
315 * @rport: Fibre Channel remote port to be removed 386 * @rdata: private remote port
316 * 387 *
317 * Locking Note: Called without the rport lock held. This 388 * Locking Note: Called without the rport lock held. This
318 * function will hold the rport lock, call an _enter_* 389 * function will hold the rport lock, call an _enter_*
319 * function and then unlock the rport. 390 * function and then unlock the rport.
320 */ 391 */
321int fc_rport_logoff(struct fc_rport *rport) 392int fc_rport_logoff(struct fc_rport_priv *rdata)
322{ 393{
323 struct fc_rport_libfc_priv *rdata = rport->dd_data;
324
325 mutex_lock(&rdata->rp_mutex); 394 mutex_lock(&rdata->rp_mutex);
326 395
327 FC_RPORT_DBG(rport, "Remove port\n"); 396 FC_RPORT_DBG(rdata, "Remove port\n");
328 397
329 if (rdata->rp_state == RPORT_ST_NONE) { 398 if (rdata->rp_state == RPORT_ST_DELETE) {
330 FC_RPORT_DBG(rport, "Port in NONE state, not removing\n"); 399 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
331 mutex_unlock(&rdata->rp_mutex); 400 mutex_unlock(&rdata->rp_mutex);
332 goto out; 401 goto out;
333 } 402 }
334 403
335 fc_rport_enter_logo(rport); 404 fc_rport_enter_logo(rdata);
336 405
337 /* 406 /*
338 * Change the state to NONE so that we discard 407 * Change the state to Delete so that we discard
339 * the response. 408 * the response.
340 */ 409 */
341 fc_rport_state_enter(rport, RPORT_ST_NONE); 410 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
342
343 mutex_unlock(&rdata->rp_mutex);
344
345 cancel_delayed_work_sync(&rdata->retry_work);
346
347 mutex_lock(&rdata->rp_mutex);
348
349 rdata->event = RPORT_EV_STOP;
350 queue_work(rport_event_queue, &rdata->event_work);
351
352 mutex_unlock(&rdata->rp_mutex); 411 mutex_unlock(&rdata->rp_mutex);
353 412
354out: 413out:
@@ -357,26 +416,25 @@ out:
357 416
358/** 417/**
359 * fc_rport_enter_ready() - The rport is ready 418 * fc_rport_enter_ready() - The rport is ready
360 * @rport: Fibre Channel remote port that is ready 419 * @rdata: private remote port
361 * 420 *
362 * Locking Note: The rport lock is expected to be held before calling 421 * Locking Note: The rport lock is expected to be held before calling
363 * this routine. 422 * this routine.
364 */ 423 */
365static void fc_rport_enter_ready(struct fc_rport *rport) 424static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
366{ 425{
367 struct fc_rport_libfc_priv *rdata = rport->dd_data; 426 fc_rport_state_enter(rdata, RPORT_ST_READY);
368
369 fc_rport_state_enter(rport, RPORT_ST_READY);
370 427
371 FC_RPORT_DBG(rport, "Port is Ready\n"); 428 FC_RPORT_DBG(rdata, "Port is Ready\n");
372 429
373 rdata->event = RPORT_EV_CREATED; 430 if (rdata->event == RPORT_EV_NONE)
374 queue_work(rport_event_queue, &rdata->event_work); 431 queue_work(rport_event_queue, &rdata->event_work);
432 rdata->event = RPORT_EV_READY;
375} 433}
376 434
377/** 435/**
378 * fc_rport_timeout() - Handler for the retry_work timer. 436 * fc_rport_timeout() - Handler for the retry_work timer.
379 * @work: The work struct of the fc_rport_libfc_priv 437 * @work: The work struct of the fc_rport_priv
380 * 438 *
381 * Locking Note: Called without the rport lock held. This 439 * Locking Note: Called without the rport lock held. This
382 * function will hold the rport lock, call an _enter_* 440 * function will hold the rport lock, call an _enter_*
@@ -384,63 +442,63 @@ static void fc_rport_enter_ready(struct fc_rport *rport)
384 */ 442 */
385static void fc_rport_timeout(struct work_struct *work) 443static void fc_rport_timeout(struct work_struct *work)
386{ 444{
387 struct fc_rport_libfc_priv *rdata = 445 struct fc_rport_priv *rdata =
388 container_of(work, struct fc_rport_libfc_priv, retry_work.work); 446 container_of(work, struct fc_rport_priv, retry_work.work);
389 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
390 447
391 mutex_lock(&rdata->rp_mutex); 448 mutex_lock(&rdata->rp_mutex);
392 449
393 switch (rdata->rp_state) { 450 switch (rdata->rp_state) {
394 case RPORT_ST_PLOGI: 451 case RPORT_ST_PLOGI:
395 fc_rport_enter_plogi(rport); 452 fc_rport_enter_plogi(rdata);
396 break; 453 break;
397 case RPORT_ST_PRLI: 454 case RPORT_ST_PRLI:
398 fc_rport_enter_prli(rport); 455 fc_rport_enter_prli(rdata);
399 break; 456 break;
400 case RPORT_ST_RTV: 457 case RPORT_ST_RTV:
401 fc_rport_enter_rtv(rport); 458 fc_rport_enter_rtv(rdata);
402 break; 459 break;
403 case RPORT_ST_LOGO: 460 case RPORT_ST_LOGO:
404 fc_rport_enter_logo(rport); 461 fc_rport_enter_logo(rdata);
462 break;
463 case RPORT_ST_ADISC:
464 fc_rport_enter_adisc(rdata);
405 break; 465 break;
406 case RPORT_ST_READY: 466 case RPORT_ST_READY:
407 case RPORT_ST_INIT: 467 case RPORT_ST_INIT:
408 case RPORT_ST_NONE: 468 case RPORT_ST_DELETE:
409 break; 469 break;
410 } 470 }
411 471
412 mutex_unlock(&rdata->rp_mutex); 472 mutex_unlock(&rdata->rp_mutex);
413 put_device(&rport->dev);
414} 473}
415 474
416/** 475/**
417 * fc_rport_error() - Error handler, called once retries have been exhausted 476 * fc_rport_error() - Error handler, called once retries have been exhausted
418 * @rport: The fc_rport object 477 * @rdata: private remote port
419 * @fp: The frame pointer 478 * @fp: The frame pointer
420 * 479 *
421 * Locking Note: The rport lock is expected to be held before 480 * Locking Note: The rport lock is expected to be held before
422 * calling this routine 481 * calling this routine
423 */ 482 */
424static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) 483static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
425{ 484{
426 struct fc_rport_libfc_priv *rdata = rport->dd_data; 485 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
427 486 IS_ERR(fp) ? -PTR_ERR(fp) : 0,
428 FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n", 487 fc_rport_state(rdata), rdata->retries);
429 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
430 488
431 switch (rdata->rp_state) { 489 switch (rdata->rp_state) {
432 case RPORT_ST_PLOGI: 490 case RPORT_ST_PLOGI:
433 case RPORT_ST_PRLI:
434 case RPORT_ST_LOGO: 491 case RPORT_ST_LOGO:
435 rdata->event = RPORT_EV_FAILED; 492 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
436 fc_rport_state_enter(rport, RPORT_ST_NONE);
437 queue_work(rport_event_queue,
438 &rdata->event_work);
439 break; 493 break;
440 case RPORT_ST_RTV: 494 case RPORT_ST_RTV:
441 fc_rport_enter_ready(rport); 495 fc_rport_enter_ready(rdata);
442 break; 496 break;
443 case RPORT_ST_NONE: 497 case RPORT_ST_PRLI:
498 case RPORT_ST_ADISC:
499 fc_rport_enter_logo(rdata);
500 break;
501 case RPORT_ST_DELETE:
444 case RPORT_ST_READY: 502 case RPORT_ST_READY:
445 case RPORT_ST_INIT: 503 case RPORT_ST_INIT:
446 break; 504 break;
@@ -449,7 +507,7 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
449 507
450/** 508/**
451 * fc_rport_error_retry() - Error handler when retries are desired 509 * fc_rport_error_retry() - Error handler when retries are desired
452 * @rport: The fc_rport object 510 * @rdata: private remote port data
453 * @fp: The frame pointer 511 * @fp: The frame pointer
454 * 512 *
455 * If the error was an exchange timeout retry immediately, 513 * If the error was an exchange timeout retry immediately,
@@ -458,45 +516,43 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
458 * Locking Note: The rport lock is expected to be held before 516 * Locking Note: The rport lock is expected to be held before
459 * calling this routine 517 * calling this routine
460 */ 518 */
461static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) 519static void fc_rport_error_retry(struct fc_rport_priv *rdata,
520 struct fc_frame *fp)
462{ 521{
463 struct fc_rport_libfc_priv *rdata = rport->dd_data;
464 unsigned long delay = FC_DEF_E_D_TOV; 522 unsigned long delay = FC_DEF_E_D_TOV;
465 523
466 /* make sure this isn't an FC_EX_CLOSED error, never retry those */ 524 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
467 if (PTR_ERR(fp) == -FC_EX_CLOSED) 525 if (PTR_ERR(fp) == -FC_EX_CLOSED)
468 return fc_rport_error(rport, fp); 526 return fc_rport_error(rdata, fp);
469 527
470 if (rdata->retries < rdata->local_port->max_rport_retry_count) { 528 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
471 FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n", 529 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
472 PTR_ERR(fp), fc_rport_state(rport)); 530 PTR_ERR(fp), fc_rport_state(rdata));
473 rdata->retries++; 531 rdata->retries++;
474 /* no additional delay on exchange timeouts */ 532 /* no additional delay on exchange timeouts */
475 if (PTR_ERR(fp) == -FC_EX_TIMEOUT) 533 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
476 delay = 0; 534 delay = 0;
477 get_device(&rport->dev);
478 schedule_delayed_work(&rdata->retry_work, delay); 535 schedule_delayed_work(&rdata->retry_work, delay);
479 return; 536 return;
480 } 537 }
481 538
482 return fc_rport_error(rport, fp); 539 return fc_rport_error(rdata, fp);
483} 540}
484 541
485/** 542/**
486 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response 543 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
487 * @sp: current sequence in the PLOGI exchange 544 * @sp: current sequence in the PLOGI exchange
488 * @fp: response frame 545 * @fp: response frame
489 * @rp_arg: Fibre Channel remote port 546 * @rdata_arg: private remote port data
490 * 547 *
491 * Locking Note: This function will be called without the rport lock 548 * Locking Note: This function will be called without the rport lock
492 * held, but it will lock, call an _enter_* function or fc_rport_error 549 * held, but it will lock, call an _enter_* function or fc_rport_error
493 * and then unlock the rport. 550 * and then unlock the rport.
494 */ 551 */
495static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, 552static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
496 void *rp_arg) 553 void *rdata_arg)
497{ 554{
498 struct fc_rport *rport = rp_arg; 555 struct fc_rport_priv *rdata = rdata_arg;
499 struct fc_rport_libfc_priv *rdata = rport->dd_data;
500 struct fc_lport *lport = rdata->local_port; 556 struct fc_lport *lport = rdata->local_port;
501 struct fc_els_flogi *plp = NULL; 557 struct fc_els_flogi *plp = NULL;
502 unsigned int tov; 558 unsigned int tov;
@@ -506,26 +562,26 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
506 562
507 mutex_lock(&rdata->rp_mutex); 563 mutex_lock(&rdata->rp_mutex);
508 564
509 FC_RPORT_DBG(rport, "Received a PLOGI response\n"); 565 FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
510 566
511 if (rdata->rp_state != RPORT_ST_PLOGI) { 567 if (rdata->rp_state != RPORT_ST_PLOGI) {
512 FC_RPORT_DBG(rport, "Received a PLOGI response, but in state " 568 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
513 "%s\n", fc_rport_state(rport)); 569 "%s\n", fc_rport_state(rdata));
514 if (IS_ERR(fp)) 570 if (IS_ERR(fp))
515 goto err; 571 goto err;
516 goto out; 572 goto out;
517 } 573 }
518 574
519 if (IS_ERR(fp)) { 575 if (IS_ERR(fp)) {
520 fc_rport_error_retry(rport, fp); 576 fc_rport_error_retry(rdata, fp);
521 goto err; 577 goto err;
522 } 578 }
523 579
524 op = fc_frame_payload_op(fp); 580 op = fc_frame_payload_op(fp);
525 if (op == ELS_LS_ACC && 581 if (op == ELS_LS_ACC &&
526 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { 582 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
527 rport->port_name = get_unaligned_be64(&plp->fl_wwpn); 583 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
528 rport->node_name = get_unaligned_be64(&plp->fl_wwnn); 584 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
529 585
530 tov = ntohl(plp->fl_csp.sp_e_d_tov); 586 tov = ntohl(plp->fl_csp.sp_e_d_tov);
531 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) 587 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
@@ -537,75 +593,64 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
537 if (cssp_seq < csp_seq) 593 if (cssp_seq < csp_seq)
538 csp_seq = cssp_seq; 594 csp_seq = cssp_seq;
539 rdata->max_seq = csp_seq; 595 rdata->max_seq = csp_seq;
540 rport->maxframe_size = 596 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
541 fc_plogi_get_maxframe(plp, lport->mfs); 597 fc_rport_enter_prli(rdata);
542
543 /*
544 * If the rport is one of the well known addresses
545 * we skip PRLI and RTV and go straight to READY.
546 */
547 if (rport->port_id >= FC_FID_DOM_MGR)
548 fc_rport_enter_ready(rport);
549 else
550 fc_rport_enter_prli(rport);
551 } else 598 } else
552 fc_rport_error_retry(rport, fp); 599 fc_rport_error_retry(rdata, fp);
553 600
554out: 601out:
555 fc_frame_free(fp); 602 fc_frame_free(fp);
556err: 603err:
557 mutex_unlock(&rdata->rp_mutex); 604 mutex_unlock(&rdata->rp_mutex);
558 put_device(&rport->dev); 605 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
559} 606}
560 607
561/** 608/**
562 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer 609 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
563 * @rport: Fibre Channel remote port to send PLOGI to 610 * @rdata: private remote port data
564 * 611 *
565 * Locking Note: The rport lock is expected to be held before calling 612 * Locking Note: The rport lock is expected to be held before calling
566 * this routine. 613 * this routine.
567 */ 614 */
568static void fc_rport_enter_plogi(struct fc_rport *rport) 615static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
569{ 616{
570 struct fc_rport_libfc_priv *rdata = rport->dd_data;
571 struct fc_lport *lport = rdata->local_port; 617 struct fc_lport *lport = rdata->local_port;
572 struct fc_frame *fp; 618 struct fc_frame *fp;
573 619
574 FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n", 620 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
575 fc_rport_state(rport)); 621 fc_rport_state(rdata));
576 622
577 fc_rport_state_enter(rport, RPORT_ST_PLOGI); 623 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
578 624
579 rport->maxframe_size = FC_MIN_MAX_PAYLOAD; 625 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
580 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 626 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
581 if (!fp) { 627 if (!fp) {
582 fc_rport_error_retry(rport, fp); 628 fc_rport_error_retry(rdata, fp);
583 return; 629 return;
584 } 630 }
585 rdata->e_d_tov = lport->e_d_tov; 631 rdata->e_d_tov = lport->e_d_tov;
586 632
587 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI, 633 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
588 fc_rport_plogi_resp, rport, lport->e_d_tov)) 634 fc_rport_plogi_resp, rdata, lport->e_d_tov))
589 fc_rport_error_retry(rport, fp); 635 fc_rport_error_retry(rdata, fp);
590 else 636 else
591 get_device(&rport->dev); 637 kref_get(&rdata->kref);
592} 638}
593 639
594/** 640/**
595 * fc_rport_prli_resp() - Process Login (PRLI) response handler 641 * fc_rport_prli_resp() - Process Login (PRLI) response handler
596 * @sp: current sequence in the PRLI exchange 642 * @sp: current sequence in the PRLI exchange
597 * @fp: response frame 643 * @fp: response frame
598 * @rp_arg: Fibre Channel remote port 644 * @rdata_arg: private remote port data
599 * 645 *
600 * Locking Note: This function will be called without the rport lock 646 * Locking Note: This function will be called without the rport lock
601 * held, but it will lock, call an _enter_* function or fc_rport_error 647 * held, but it will lock, call an _enter_* function or fc_rport_error
602 * and then unlock the rport. 648 * and then unlock the rport.
603 */ 649 */
604static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, 650static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
605 void *rp_arg) 651 void *rdata_arg)
606{ 652{
607 struct fc_rport *rport = rp_arg; 653 struct fc_rport_priv *rdata = rdata_arg;
608 struct fc_rport_libfc_priv *rdata = rport->dd_data;
609 struct { 654 struct {
610 struct fc_els_prli prli; 655 struct fc_els_prli prli;
611 struct fc_els_spp spp; 656 struct fc_els_spp spp;
@@ -616,21 +661,24 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
616 661
617 mutex_lock(&rdata->rp_mutex); 662 mutex_lock(&rdata->rp_mutex);
618 663
619 FC_RPORT_DBG(rport, "Received a PRLI response\n"); 664 FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
620 665
621 if (rdata->rp_state != RPORT_ST_PRLI) { 666 if (rdata->rp_state != RPORT_ST_PRLI) {
622 FC_RPORT_DBG(rport, "Received a PRLI response, but in state " 667 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
623 "%s\n", fc_rport_state(rport)); 668 "%s\n", fc_rport_state(rdata));
624 if (IS_ERR(fp)) 669 if (IS_ERR(fp))
625 goto err; 670 goto err;
626 goto out; 671 goto out;
627 } 672 }
628 673
629 if (IS_ERR(fp)) { 674 if (IS_ERR(fp)) {
630 fc_rport_error_retry(rport, fp); 675 fc_rport_error_retry(rdata, fp);
631 goto err; 676 goto err;
632 } 677 }
633 678
679 /* reinitialize remote port roles */
680 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
681
634 op = fc_frame_payload_op(fp); 682 op = fc_frame_payload_op(fp);
635 if (op == ELS_LS_ACC) { 683 if (op == ELS_LS_ACC) {
636 pp = fc_frame_payload_get(fp, sizeof(*pp)); 684 pp = fc_frame_payload_get(fp, sizeof(*pp));
@@ -640,90 +688,82 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
640 rdata->flags |= FC_RP_FLAGS_RETRY; 688 rdata->flags |= FC_RP_FLAGS_RETRY;
641 } 689 }
642 690
643 rport->supported_classes = FC_COS_CLASS3; 691 rdata->supported_classes = FC_COS_CLASS3;
644 if (fcp_parm & FCP_SPPF_INIT_FCN) 692 if (fcp_parm & FCP_SPPF_INIT_FCN)
645 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 693 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
646 if (fcp_parm & FCP_SPPF_TARG_FCN) 694 if (fcp_parm & FCP_SPPF_TARG_FCN)
647 roles |= FC_RPORT_ROLE_FCP_TARGET; 695 roles |= FC_RPORT_ROLE_FCP_TARGET;
648 696
649 rport->roles = roles; 697 rdata->ids.roles = roles;
650 fc_rport_enter_rtv(rport); 698 fc_rport_enter_rtv(rdata);
651 699
652 } else { 700 } else {
653 FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n"); 701 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
654 rdata->event = RPORT_EV_FAILED; 702 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
655 fc_rport_state_enter(rport, RPORT_ST_NONE);
656 queue_work(rport_event_queue, &rdata->event_work);
657 } 703 }
658 704
659out: 705out:
660 fc_frame_free(fp); 706 fc_frame_free(fp);
661err: 707err:
662 mutex_unlock(&rdata->rp_mutex); 708 mutex_unlock(&rdata->rp_mutex);
663 put_device(&rport->dev); 709 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
664} 710}
665 711
666/** 712/**
667 * fc_rport_logo_resp() - Logout (LOGO) response handler 713 * fc_rport_logo_resp() - Logout (LOGO) response handler
668 * @sp: current sequence in the LOGO exchange 714 * @sp: current sequence in the LOGO exchange
669 * @fp: response frame 715 * @fp: response frame
670 * @rp_arg: Fibre Channel remote port 716 * @rdata_arg: private remote port data
671 * 717 *
672 * Locking Note: This function will be called without the rport lock 718 * Locking Note: This function will be called without the rport lock
673 * held, but it will lock, call an _enter_* function or fc_rport_error 719 * held, but it will lock, call an _enter_* function or fc_rport_error
674 * and then unlock the rport. 720 * and then unlock the rport.
675 */ 721 */
676static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 722static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
677 void *rp_arg) 723 void *rdata_arg)
678{ 724{
679 struct fc_rport *rport = rp_arg; 725 struct fc_rport_priv *rdata = rdata_arg;
680 struct fc_rport_libfc_priv *rdata = rport->dd_data;
681 u8 op; 726 u8 op;
682 727
683 mutex_lock(&rdata->rp_mutex); 728 mutex_lock(&rdata->rp_mutex);
684 729
685 FC_RPORT_DBG(rport, "Received a LOGO response\n"); 730 FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
686 731
687 if (rdata->rp_state != RPORT_ST_LOGO) { 732 if (rdata->rp_state != RPORT_ST_LOGO) {
688 FC_RPORT_DBG(rport, "Received a LOGO response, but in state " 733 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
689 "%s\n", fc_rport_state(rport)); 734 "%s\n", fc_rport_state(rdata));
690 if (IS_ERR(fp)) 735 if (IS_ERR(fp))
691 goto err; 736 goto err;
692 goto out; 737 goto out;
693 } 738 }
694 739
695 if (IS_ERR(fp)) { 740 if (IS_ERR(fp)) {
696 fc_rport_error_retry(rport, fp); 741 fc_rport_error_retry(rdata, fp);
697 goto err; 742 goto err;
698 } 743 }
699 744
700 op = fc_frame_payload_op(fp); 745 op = fc_frame_payload_op(fp);
701 if (op == ELS_LS_ACC) { 746 if (op != ELS_LS_ACC)
702 fc_rport_enter_rtv(rport); 747 FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
703 } else { 748 op);
704 FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n"); 749 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
705 rdata->event = RPORT_EV_LOGO;
706 fc_rport_state_enter(rport, RPORT_ST_NONE);
707 queue_work(rport_event_queue, &rdata->event_work);
708 }
709 750
710out: 751out:
711 fc_frame_free(fp); 752 fc_frame_free(fp);
712err: 753err:
713 mutex_unlock(&rdata->rp_mutex); 754 mutex_unlock(&rdata->rp_mutex);
714 put_device(&rport->dev); 755 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
715} 756}
716 757
717/** 758/**
718 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer 759 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
719 * @rport: Fibre Channel remote port to send PRLI to 760 * @rdata: private remote port data
720 * 761 *
721 * Locking Note: The rport lock is expected to be held before calling 762 * Locking Note: The rport lock is expected to be held before calling
722 * this routine. 763 * this routine.
723 */ 764 */
724static void fc_rport_enter_prli(struct fc_rport *rport) 765static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
725{ 766{
726 struct fc_rport_libfc_priv *rdata = rport->dd_data;
727 struct fc_lport *lport = rdata->local_port; 767 struct fc_lport *lport = rdata->local_port;
728 struct { 768 struct {
729 struct fc_els_prli prli; 769 struct fc_els_prli prli;
@@ -731,29 +771,38 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
731 } *pp; 771 } *pp;
732 struct fc_frame *fp; 772 struct fc_frame *fp;
733 773
734 FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n", 774 /*
735 fc_rport_state(rport)); 775 * If the rport is one of the well known addresses
776 * we skip PRLI and RTV and go straight to READY.
777 */
778 if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
779 fc_rport_enter_ready(rdata);
780 return;
781 }
782
783 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
784 fc_rport_state(rdata));
736 785
737 fc_rport_state_enter(rport, RPORT_ST_PRLI); 786 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
738 787
739 fp = fc_frame_alloc(lport, sizeof(*pp)); 788 fp = fc_frame_alloc(lport, sizeof(*pp));
740 if (!fp) { 789 if (!fp) {
741 fc_rport_error_retry(rport, fp); 790 fc_rport_error_retry(rdata, fp);
742 return; 791 return;
743 } 792 }
744 793
745 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI, 794 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
746 fc_rport_prli_resp, rport, lport->e_d_tov)) 795 fc_rport_prli_resp, rdata, lport->e_d_tov))
747 fc_rport_error_retry(rport, fp); 796 fc_rport_error_retry(rdata, fp);
748 else 797 else
749 get_device(&rport->dev); 798 kref_get(&rdata->kref);
750} 799}
751 800
752/** 801/**
753 * fc_rport_els_rtv_resp() - Request Timeout Value response handler 802 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
754 * @sp: current sequence in the RTV exchange 803 * @sp: current sequence in the RTV exchange
755 * @fp: response frame 804 * @fp: response frame
756 * @rp_arg: Fibre Channel remote port 805 * @rdata_arg: private remote port data
757 * 806 *
758 * Many targets don't seem to support this. 807 * Many targets don't seem to support this.
759 * 808 *
@@ -762,26 +811,25 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
762 * and then unlock the rport. 811 * and then unlock the rport.
763 */ 812 */
764static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, 813static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
765 void *rp_arg) 814 void *rdata_arg)
766{ 815{
767 struct fc_rport *rport = rp_arg; 816 struct fc_rport_priv *rdata = rdata_arg;
768 struct fc_rport_libfc_priv *rdata = rport->dd_data;
769 u8 op; 817 u8 op;
770 818
771 mutex_lock(&rdata->rp_mutex); 819 mutex_lock(&rdata->rp_mutex);
772 820
773 FC_RPORT_DBG(rport, "Received a RTV response\n"); 821 FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
774 822
775 if (rdata->rp_state != RPORT_ST_RTV) { 823 if (rdata->rp_state != RPORT_ST_RTV) {
776 FC_RPORT_DBG(rport, "Received a RTV response, but in state " 824 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
777 "%s\n", fc_rport_state(rport)); 825 "%s\n", fc_rport_state(rdata));
778 if (IS_ERR(fp)) 826 if (IS_ERR(fp))
779 goto err; 827 goto err;
780 goto out; 828 goto out;
781 } 829 }
782 830
783 if (IS_ERR(fp)) { 831 if (IS_ERR(fp)) {
784 fc_rport_error(rport, fp); 832 fc_rport_error(rdata, fp);
785 goto err; 833 goto err;
786 } 834 }
787 835
@@ -807,184 +855,376 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
807 } 855 }
808 } 856 }
809 857
810 fc_rport_enter_ready(rport); 858 fc_rport_enter_ready(rdata);
811 859
812out: 860out:
813 fc_frame_free(fp); 861 fc_frame_free(fp);
814err: 862err:
815 mutex_unlock(&rdata->rp_mutex); 863 mutex_unlock(&rdata->rp_mutex);
816 put_device(&rport->dev); 864 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
817} 865}
818 866
819/** 867/**
820 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer 868 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
821 * @rport: Fibre Channel remote port to send RTV to 869 * @rdata: private remote port data
822 * 870 *
823 * Locking Note: The rport lock is expected to be held before calling 871 * Locking Note: The rport lock is expected to be held before calling
824 * this routine. 872 * this routine.
825 */ 873 */
826static void fc_rport_enter_rtv(struct fc_rport *rport) 874static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
827{ 875{
828 struct fc_frame *fp; 876 struct fc_frame *fp;
829 struct fc_rport_libfc_priv *rdata = rport->dd_data;
830 struct fc_lport *lport = rdata->local_port; 877 struct fc_lport *lport = rdata->local_port;
831 878
832 FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n", 879 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
833 fc_rport_state(rport)); 880 fc_rport_state(rdata));
834 881
835 fc_rport_state_enter(rport, RPORT_ST_RTV); 882 fc_rport_state_enter(rdata, RPORT_ST_RTV);
836 883
837 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); 884 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
838 if (!fp) { 885 if (!fp) {
839 fc_rport_error_retry(rport, fp); 886 fc_rport_error_retry(rdata, fp);
840 return; 887 return;
841 } 888 }
842 889
843 if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV, 890 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
844 fc_rport_rtv_resp, rport, lport->e_d_tov)) 891 fc_rport_rtv_resp, rdata, lport->e_d_tov))
845 fc_rport_error_retry(rport, fp); 892 fc_rport_error_retry(rdata, fp);
846 else 893 else
847 get_device(&rport->dev); 894 kref_get(&rdata->kref);
848} 895}
849 896
850/** 897/**
851 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer 898 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
852 * @rport: Fibre Channel remote port to send LOGO to 899 * @rdata: private remote port data
853 * 900 *
854 * Locking Note: The rport lock is expected to be held before calling 901 * Locking Note: The rport lock is expected to be held before calling
855 * this routine. 902 * this routine.
856 */ 903 */
857static void fc_rport_enter_logo(struct fc_rport *rport) 904static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
858{ 905{
859 struct fc_rport_libfc_priv *rdata = rport->dd_data;
860 struct fc_lport *lport = rdata->local_port; 906 struct fc_lport *lport = rdata->local_port;
861 struct fc_frame *fp; 907 struct fc_frame *fp;
862 908
863 FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n", 909 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
864 fc_rport_state(rport)); 910 fc_rport_state(rdata));
865 911
866 fc_rport_state_enter(rport, RPORT_ST_LOGO); 912 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
867 913
868 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); 914 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
869 if (!fp) { 915 if (!fp) {
870 fc_rport_error_retry(rport, fp); 916 fc_rport_error_retry(rdata, fp);
871 return; 917 return;
872 } 918 }
873 919
874 if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO, 920 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
875 fc_rport_logo_resp, rport, lport->e_d_tov)) 921 fc_rport_logo_resp, rdata, lport->e_d_tov))
876 fc_rport_error_retry(rport, fp); 922 fc_rport_error_retry(rdata, fp);
877 else 923 else
878 get_device(&rport->dev); 924 kref_get(&rdata->kref);
879} 925}
880 926
881
882/** 927/**
883 * fc_rport_recv_req() - Receive a request from a rport 928 * fc_rport_els_adisc_resp() - Address Discovery response handler
884 * @sp: current sequence in the PLOGI exchange 929 * @sp: current sequence in the ADISC exchange
885 * @fp: response frame 930 * @fp: response frame
886 * @rp_arg: Fibre Channel remote port 931 * @rdata_arg: remote port private.
887 * 932 *
888 * Locking Note: Called without the rport lock held. This 933 * Locking Note: This function will be called without the rport lock
889 * function will hold the rport lock, call an _enter_* 934 * held, but it will lock, call an _enter_* function or fc_rport_error
890 * function and then unlock the rport. 935 * and then unlock the rport.
891 */ 936 */
892void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, 937static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
893 struct fc_rport *rport) 938 void *rdata_arg)
939{
940 struct fc_rport_priv *rdata = rdata_arg;
941 struct fc_els_adisc *adisc;
942 u8 op;
943
944 mutex_lock(&rdata->rp_mutex);
945
946 FC_RPORT_DBG(rdata, "Received a ADISC response\n");
947
948 if (rdata->rp_state != RPORT_ST_ADISC) {
949 FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
950 fc_rport_state(rdata));
951 if (IS_ERR(fp))
952 goto err;
953 goto out;
954 }
955
956 if (IS_ERR(fp)) {
957 fc_rport_error(rdata, fp);
958 goto err;
959 }
960
961 /*
962 * If address verification failed. Consider us logged out of the rport.
963 * Since the rport is still in discovery, we want to be
964 * logged in, so go to PLOGI state. Otherwise, go back to READY.
965 */
966 op = fc_frame_payload_op(fp);
967 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
968 if (op != ELS_LS_ACC || !adisc ||
969 ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
970 get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
971 get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
972 FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
973 fc_rport_enter_plogi(rdata);
974 } else {
975 FC_RPORT_DBG(rdata, "ADISC OK\n");
976 fc_rport_enter_ready(rdata);
977 }
978out:
979 fc_frame_free(fp);
980err:
981 mutex_unlock(&rdata->rp_mutex);
982 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
983}
984
985/**
986 * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
987 * @rdata: remote port private data
988 *
989 * Locking Note: The rport lock is expected to be held before calling
990 * this routine.
991 */
992static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
894{ 993{
895 struct fc_rport_libfc_priv *rdata = rport->dd_data;
896 struct fc_lport *lport = rdata->local_port; 994 struct fc_lport *lport = rdata->local_port;
995 struct fc_frame *fp;
996
997 FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
998 fc_rport_state(rdata));
897 999
1000 fc_rport_state_enter(rdata, RPORT_ST_ADISC);
1001
1002 fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
1003 if (!fp) {
1004 fc_rport_error_retry(rdata, fp);
1005 return;
1006 }
1007 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1008 fc_rport_adisc_resp, rdata, lport->e_d_tov))
1009 fc_rport_error_retry(rdata, fp);
1010 else
1011 kref_get(&rdata->kref);
1012}
1013
1014/**
1015 * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request
1016 * @rdata: remote port private
1017 * @sp: current sequence in the ADISC exchange
1018 * @in_fp: ADISC request frame
1019 *
1020 * Locking Note: Called with the lport and rport locks held.
1021 */
1022static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1023 struct fc_seq *sp, struct fc_frame *in_fp)
1024{
1025 struct fc_lport *lport = rdata->local_port;
1026 struct fc_frame *fp;
1027 struct fc_exch *ep = fc_seq_exch(sp);
1028 struct fc_els_adisc *adisc;
1029 struct fc_seq_els_data rjt_data;
1030 u32 f_ctl;
1031
1032 FC_RPORT_DBG(rdata, "Received ADISC request\n");
1033
1034 adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
1035 if (!adisc) {
1036 rjt_data.fp = NULL;
1037 rjt_data.reason = ELS_RJT_PROT;
1038 rjt_data.explan = ELS_EXPL_INV_LEN;
1039 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1040 goto drop;
1041 }
1042
1043 fp = fc_frame_alloc(lport, sizeof(*adisc));
1044 if (!fp)
1045 goto drop;
1046 fc_adisc_fill(lport, fp);
1047 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
1048 adisc->adisc_cmd = ELS_LS_ACC;
1049 sp = lport->tt.seq_start_next(sp);
1050 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1051 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1052 FC_TYPE_ELS, f_ctl, 0);
1053 lport->tt.seq_send(lport, sp, fp);
1054drop:
1055 fc_frame_free(in_fp);
1056}
1057
1058/**
1059 * fc_rport_recv_els_req() - handle a validated ELS request.
1060 * @lport: Fibre Channel local port
1061 * @sp: current sequence in the PLOGI exchange
1062 * @fp: response frame
1063 *
1064 * Handle incoming ELS requests that require port login.
1065 * The ELS opcode has already been validated by the caller.
1066 *
1067 * Locking Note: Called with the lport lock held.
1068 */
1069static void fc_rport_recv_els_req(struct fc_lport *lport,
1070 struct fc_seq *sp, struct fc_frame *fp)
1071{
1072 struct fc_rport_priv *rdata;
898 struct fc_frame_header *fh; 1073 struct fc_frame_header *fh;
899 struct fc_seq_els_data els_data; 1074 struct fc_seq_els_data els_data;
900 u8 op;
901
902 mutex_lock(&rdata->rp_mutex);
903 1075
904 els_data.fp = NULL; 1076 els_data.fp = NULL;
905 els_data.explan = ELS_EXPL_NONE; 1077 els_data.reason = ELS_RJT_UNAB;
906 els_data.reason = ELS_RJT_NONE; 1078 els_data.explan = ELS_EXPL_PLOGI_REQD;
907 1079
908 fh = fc_frame_header_get(fp); 1080 fh = fc_frame_header_get(fp);
909 1081
910 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) { 1082 mutex_lock(&lport->disc.disc_mutex);
911 op = fc_frame_payload_op(fp); 1083 rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
912 switch (op) { 1084 if (!rdata) {
913 case ELS_PLOGI: 1085 mutex_unlock(&lport->disc.disc_mutex);
914 fc_rport_recv_plogi_req(rport, sp, fp); 1086 goto reject;
915 break; 1087 }
916 case ELS_PRLI: 1088 mutex_lock(&rdata->rp_mutex);
917 fc_rport_recv_prli_req(rport, sp, fp); 1089 mutex_unlock(&lport->disc.disc_mutex);
918 break; 1090
919 case ELS_PRLO: 1091 switch (rdata->rp_state) {
920 fc_rport_recv_prlo_req(rport, sp, fp); 1092 case RPORT_ST_PRLI:
921 break; 1093 case RPORT_ST_RTV:
922 case ELS_LOGO: 1094 case RPORT_ST_READY:
923 fc_rport_recv_logo_req(rport, sp, fp); 1095 case RPORT_ST_ADISC:
924 break; 1096 break;
925 case ELS_RRQ: 1097 default:
926 els_data.fp = fp; 1098 mutex_unlock(&rdata->rp_mutex);
927 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); 1099 goto reject;
928 break; 1100 }
929 case ELS_REC: 1101
930 els_data.fp = fp; 1102 switch (fc_frame_payload_op(fp)) {
931 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); 1103 case ELS_PRLI:
932 break; 1104 fc_rport_recv_prli_req(rdata, sp, fp);
933 default: 1105 break;
934 els_data.reason = ELS_RJT_UNSUP; 1106 case ELS_PRLO:
935 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); 1107 fc_rport_recv_prlo_req(rdata, sp, fp);
936 break; 1108 break;
937 } 1109 case ELS_ADISC:
1110 fc_rport_recv_adisc_req(rdata, sp, fp);
1111 break;
1112 case ELS_RRQ:
1113 els_data.fp = fp;
1114 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
1115 break;
1116 case ELS_REC:
1117 els_data.fp = fp;
1118 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
1119 break;
1120 default:
1121 fc_frame_free(fp); /* can't happen */
1122 break;
938 } 1123 }
939 1124
940 mutex_unlock(&rdata->rp_mutex); 1125 mutex_unlock(&rdata->rp_mutex);
1126 return;
1127
1128reject:
1129 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1130 fc_frame_free(fp);
1131}
1132
1133/**
1134 * fc_rport_recv_req() - Handle a received ELS request from a rport
1135 * @sp: current sequence in the PLOGI exchange
1136 * @fp: response frame
1137 * @lport: Fibre Channel local port
1138 *
1139 * Locking Note: Called with the lport lock held.
1140 */
1141void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1142 struct fc_lport *lport)
1143{
1144 struct fc_seq_els_data els_data;
1145
1146 /*
1147 * Handle PLOGI and LOGO requests separately, since they
1148 * don't require prior login.
1149 * Check for unsupported opcodes first and reject them.
1150 * For some ops, it would be incorrect to reject with "PLOGI required".
1151 */
1152 switch (fc_frame_payload_op(fp)) {
1153 case ELS_PLOGI:
1154 fc_rport_recv_plogi_req(lport, sp, fp);
1155 break;
1156 case ELS_LOGO:
1157 fc_rport_recv_logo_req(lport, sp, fp);
1158 break;
1159 case ELS_PRLI:
1160 case ELS_PRLO:
1161 case ELS_ADISC:
1162 case ELS_RRQ:
1163 case ELS_REC:
1164 fc_rport_recv_els_req(lport, sp, fp);
1165 break;
1166 default:
1167 fc_frame_free(fp);
1168 els_data.fp = NULL;
1169 els_data.reason = ELS_RJT_UNSUP;
1170 els_data.explan = ELS_EXPL_NONE;
1171 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1172 break;
1173 }
941} 1174}
942 1175
943/** 1176/**
944 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request 1177 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
945 * @rport: Fibre Channel remote port that initiated PLOGI 1178 * @lport: local port
946 * @sp: current sequence in the PLOGI exchange 1179 * @sp: current sequence in the PLOGI exchange
947 * @fp: PLOGI request frame 1180 * @fp: PLOGI request frame
948 * 1181 *
949 * Locking Note: The rport lock is exected to be held before calling 1182 * Locking Note: The rport lock is held before calling this function.
950 * this function.
951 */ 1183 */
952static void fc_rport_recv_plogi_req(struct fc_rport *rport, 1184static void fc_rport_recv_plogi_req(struct fc_lport *lport,
953 struct fc_seq *sp, struct fc_frame *rx_fp) 1185 struct fc_seq *sp, struct fc_frame *rx_fp)
954{ 1186{
955 struct fc_rport_libfc_priv *rdata = rport->dd_data; 1187 struct fc_disc *disc;
956 struct fc_lport *lport = rdata->local_port; 1188 struct fc_rport_priv *rdata;
957 struct fc_frame *fp = rx_fp; 1189 struct fc_frame *fp = rx_fp;
958 struct fc_exch *ep; 1190 struct fc_exch *ep;
959 struct fc_frame_header *fh; 1191 struct fc_frame_header *fh;
960 struct fc_els_flogi *pl; 1192 struct fc_els_flogi *pl;
961 struct fc_seq_els_data rjt_data; 1193 struct fc_seq_els_data rjt_data;
962 u32 sid; 1194 u32 sid, f_ctl;
963 u64 wwpn;
964 u64 wwnn;
965 enum fc_els_rjt_reason reject = 0;
966 u32 f_ctl;
967 rjt_data.fp = NULL;
968 1195
1196 rjt_data.fp = NULL;
969 fh = fc_frame_header_get(fp); 1197 fh = fc_frame_header_get(fp);
1198 sid = ntoh24(fh->fh_s_id);
970 1199
971 FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n", 1200 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
972 fc_rport_state(rport));
973 1201
974 sid = ntoh24(fh->fh_s_id);
975 pl = fc_frame_payload_get(fp, sizeof(*pl)); 1202 pl = fc_frame_payload_get(fp, sizeof(*pl));
976 if (!pl) { 1203 if (!pl) {
977 FC_RPORT_DBG(rport, "Received PLOGI too short\n"); 1204 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
978 WARN_ON(1); 1205 rjt_data.reason = ELS_RJT_PROT;
979 /* XXX TBD: send reject? */ 1206 rjt_data.explan = ELS_EXPL_INV_LEN;
980 fc_frame_free(fp); 1207 goto reject;
981 return; 1208 }
1209
1210 disc = &lport->disc;
1211 mutex_lock(&disc->disc_mutex);
1212 rdata = lport->tt.rport_create(lport, sid);
1213 if (!rdata) {
1214 mutex_unlock(&disc->disc_mutex);
1215 rjt_data.reason = ELS_RJT_UNAB;
1216 rjt_data.explan = ELS_EXPL_INSUF_RES;
1217 goto reject;
982 } 1218 }
983 wwpn = get_unaligned_be64(&pl->fl_wwpn); 1219
984 wwnn = get_unaligned_be64(&pl->fl_wwnn); 1220 mutex_lock(&rdata->rp_mutex);
1221 mutex_unlock(&disc->disc_mutex);
1222
1223 rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1224 rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
985 1225
986 /* 1226 /*
987 * If the session was just created, possibly due to the incoming PLOGI, 1227 * If the rport was just created, possibly due to the incoming PLOGI,
988 * set the state appropriately and accept the PLOGI. 1228 * set the state appropriately and accept the PLOGI.
989 * 1229 *
990 * If we had also sent a PLOGI, and if the received PLOGI is from a 1230 * If we had also sent a PLOGI, and if the received PLOGI is from a
@@ -996,86 +1236,76 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
996 */ 1236 */
997 switch (rdata->rp_state) { 1237 switch (rdata->rp_state) {
998 case RPORT_ST_INIT: 1238 case RPORT_ST_INIT:
999 FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT " 1239 FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1000 "- reject\n", (unsigned long long)wwpn);
1001 reject = ELS_RJT_UNSUP;
1002 break; 1240 break;
1003 case RPORT_ST_PLOGI: 1241 case RPORT_ST_PLOGI:
1004 FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n", 1242 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1005 rdata->rp_state); 1243 if (rdata->ids.port_name < lport->wwpn) {
1006 if (wwpn < lport->wwpn) 1244 mutex_unlock(&rdata->rp_mutex);
1007 reject = ELS_RJT_INPROG; 1245 rjt_data.reason = ELS_RJT_INPROG;
1246 rjt_data.explan = ELS_EXPL_NONE;
1247 goto reject;
1248 }
1008 break; 1249 break;
1009 case RPORT_ST_PRLI: 1250 case RPORT_ST_PRLI:
1010 case RPORT_ST_READY: 1251 case RPORT_ST_READY:
1011 FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d " 1252 case RPORT_ST_ADISC:
1253 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1012 "- ignored for now\n", rdata->rp_state); 1254 "- ignored for now\n", rdata->rp_state);
1013 /* XXX TBD - should reset */ 1255 /* XXX TBD - should reset */
1014 break; 1256 break;
1015 case RPORT_ST_NONE: 1257 case RPORT_ST_DELETE:
1016 default: 1258 default:
1017 FC_RPORT_DBG(rport, "Received PLOGI in unexpected " 1259 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
1018 "state %d\n", rdata->rp_state); 1260 rdata->rp_state);
1019 fc_frame_free(fp); 1261 fc_frame_free(rx_fp);
1020 return; 1262 goto out;
1021 break;
1022 } 1263 }
1023 1264
1024 if (reject) { 1265 /*
1025 rjt_data.reason = reject; 1266 * Get session payload size from incoming PLOGI.
1026 rjt_data.explan = ELS_EXPL_NONE; 1267 */
1027 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 1268 rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1028 fc_frame_free(fp); 1269 fc_frame_free(rx_fp);
1029 } else { 1270
1030 fp = fc_frame_alloc(lport, sizeof(*pl)); 1271 /*
1031 if (fp == NULL) { 1272 * Send LS_ACC. If this fails, the originator should retry.
1032 fp = rx_fp; 1273 */
1033 rjt_data.reason = ELS_RJT_UNAB; 1274 sp = lport->tt.seq_start_next(sp);
1034 rjt_data.explan = ELS_EXPL_NONE; 1275 if (!sp)
1035 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 1276 goto out;
1036 fc_frame_free(fp); 1277 fp = fc_frame_alloc(lport, sizeof(*pl));
1037 } else { 1278 if (!fp)
1038 sp = lport->tt.seq_start_next(sp); 1279 goto out;
1039 WARN_ON(!sp); 1280
1040 fc_rport_set_name(rport, wwpn, wwnn); 1281 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1041 1282 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1042 /* 1283 ep = fc_seq_exch(sp);
1043 * Get session payload size from incoming PLOGI. 1284 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1044 */ 1285 FC_TYPE_ELS, f_ctl, 0);
1045 rport->maxframe_size = 1286 lport->tt.seq_send(lport, sp, fp);
1046 fc_plogi_get_maxframe(pl, lport->mfs); 1287 fc_rport_enter_prli(rdata);
1047 fc_frame_free(rx_fp); 1288out:
1048 fc_plogi_fill(lport, fp, ELS_LS_ACC); 1289 mutex_unlock(&rdata->rp_mutex);
1049 1290 return;
1050 /* 1291
1051 * Send LS_ACC. If this fails, 1292reject:
1052 * the originator should retry. 1293 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1053 */ 1294 fc_frame_free(fp);
1054 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1055 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1056 ep = fc_seq_exch(sp);
1057 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1058 FC_TYPE_ELS, f_ctl, 0);
1059 lport->tt.seq_send(lport, sp, fp);
1060 if (rdata->rp_state == RPORT_ST_PLOGI)
1061 fc_rport_enter_prli(rport);
1062 }
1063 }
1064} 1295}
1065 1296
1066/** 1297/**
1067 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request 1298 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1068 * @rport: Fibre Channel remote port that initiated PRLI 1299 * @rdata: private remote port data
1069 * @sp: current sequence in the PRLI exchange 1300 * @sp: current sequence in the PRLI exchange
1070 * @fp: PRLI request frame 1301 * @fp: PRLI request frame
1071 * 1302 *
1072 * Locking Note: The rport lock is exected to be held before calling 1303 * Locking Note: The rport lock is exected to be held before calling
1073 * this function. 1304 * this function.
1074 */ 1305 */
1075static void fc_rport_recv_prli_req(struct fc_rport *rport, 1306static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1076 struct fc_seq *sp, struct fc_frame *rx_fp) 1307 struct fc_seq *sp, struct fc_frame *rx_fp)
1077{ 1308{
1078 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1079 struct fc_lport *lport = rdata->local_port; 1309 struct fc_lport *lport = rdata->local_port;
1080 struct fc_exch *ep; 1310 struct fc_exch *ep;
1081 struct fc_frame *fp; 1311 struct fc_frame *fp;
@@ -1099,12 +1329,14 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
1099 1329
1100 fh = fc_frame_header_get(rx_fp); 1330 fh = fc_frame_header_get(rx_fp);
1101 1331
1102 FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n", 1332 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1103 fc_rport_state(rport)); 1333 fc_rport_state(rdata));
1104 1334
1105 switch (rdata->rp_state) { 1335 switch (rdata->rp_state) {
1106 case RPORT_ST_PRLI: 1336 case RPORT_ST_PRLI:
1337 case RPORT_ST_RTV:
1107 case RPORT_ST_READY: 1338 case RPORT_ST_READY:
1339 case RPORT_ST_ADISC:
1108 reason = ELS_RJT_NONE; 1340 reason = ELS_RJT_NONE;
1109 break; 1341 break;
1110 default: 1342 default:
@@ -1149,6 +1381,9 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
1149 pp->prli.prli_len = htons(len); 1381 pp->prli.prli_len = htons(len);
1150 len -= sizeof(struct fc_els_prli); 1382 len -= sizeof(struct fc_els_prli);
1151 1383
1384 /* reinitialize remote port roles */
1385 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1386
1152 /* 1387 /*
1153 * Go through all the service parameter pages and build 1388 * Go through all the service parameter pages and build
1154 * response. If plen indicates longer SPP than standard, 1389 * response. If plen indicates longer SPP than standard,
@@ -1169,12 +1404,12 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
1169 fcp_parm = ntohl(rspp->spp_params); 1404 fcp_parm = ntohl(rspp->spp_params);
1170 if (fcp_parm * FCP_SPPF_RETRY) 1405 if (fcp_parm * FCP_SPPF_RETRY)
1171 rdata->flags |= FC_RP_FLAGS_RETRY; 1406 rdata->flags |= FC_RP_FLAGS_RETRY;
1172 rport->supported_classes = FC_COS_CLASS3; 1407 rdata->supported_classes = FC_COS_CLASS3;
1173 if (fcp_parm & FCP_SPPF_INIT_FCN) 1408 if (fcp_parm & FCP_SPPF_INIT_FCN)
1174 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 1409 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1175 if (fcp_parm & FCP_SPPF_TARG_FCN) 1410 if (fcp_parm & FCP_SPPF_TARG_FCN)
1176 roles |= FC_RPORT_ROLE_FCP_TARGET; 1411 roles |= FC_RPORT_ROLE_FCP_TARGET;
1177 rport->roles = roles; 1412 rdata->ids.roles = roles;
1178 1413
1179 spp->spp_params = 1414 spp->spp_params =
1180 htonl(lport->service_params); 1415 htonl(lport->service_params);
@@ -1204,9 +1439,10 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
1204 */ 1439 */
1205 switch (rdata->rp_state) { 1440 switch (rdata->rp_state) {
1206 case RPORT_ST_PRLI: 1441 case RPORT_ST_PRLI:
1207 fc_rport_enter_ready(rport); 1442 fc_rport_enter_ready(rdata);
1208 break; 1443 break;
1209 case RPORT_ST_READY: 1444 case RPORT_ST_READY:
1445 case RPORT_ST_ADISC:
1210 break; 1446 break;
1211 default: 1447 default:
1212 break; 1448 break;
@@ -1217,17 +1453,17 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
1217 1453
1218/** 1454/**
1219 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request 1455 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1220 * @rport: Fibre Channel remote port that initiated PRLO 1456 * @rdata: private remote port data
1221 * @sp: current sequence in the PRLO exchange 1457 * @sp: current sequence in the PRLO exchange
1222 * @fp: PRLO request frame 1458 * @fp: PRLO request frame
1223 * 1459 *
1224 * Locking Note: The rport lock is exected to be held before calling 1460 * Locking Note: The rport lock is exected to be held before calling
1225 * this function. 1461 * this function.
1226 */ 1462 */
1227static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, 1463static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1464 struct fc_seq *sp,
1228 struct fc_frame *fp) 1465 struct fc_frame *fp)
1229{ 1466{
1230 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1231 struct fc_lport *lport = rdata->local_port; 1467 struct fc_lport *lport = rdata->local_port;
1232 1468
1233 struct fc_frame_header *fh; 1469 struct fc_frame_header *fh;
@@ -1235,13 +1471,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1235 1471
1236 fh = fc_frame_header_get(fp); 1472 fh = fc_frame_header_get(fp);
1237 1473
1238 FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n", 1474 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1239 fc_rport_state(rport)); 1475 fc_rport_state(rdata));
1240
1241 if (rdata->rp_state == RPORT_ST_NONE) {
1242 fc_frame_free(fp);
1243 return;
1244 }
1245 1476
1246 rjt_data.fp = NULL; 1477 rjt_data.fp = NULL;
1247 rjt_data.reason = ELS_RJT_UNAB; 1478 rjt_data.reason = ELS_RJT_UNAB;
@@ -1252,35 +1483,46 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1252 1483
1253/** 1484/**
1254 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request 1485 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1255 * @rport: Fibre Channel remote port that initiated LOGO 1486 * @lport: local port.
1256 * @sp: current sequence in the LOGO exchange 1487 * @sp: current sequence in the LOGO exchange
1257 * @fp: LOGO request frame 1488 * @fp: LOGO request frame
1258 * 1489 *
1259 * Locking Note: The rport lock is exected to be held before calling 1490 * Locking Note: The rport lock is exected to be held before calling
1260 * this function. 1491 * this function.
1261 */ 1492 */
1262static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, 1493static void fc_rport_recv_logo_req(struct fc_lport *lport,
1494 struct fc_seq *sp,
1263 struct fc_frame *fp) 1495 struct fc_frame *fp)
1264{ 1496{
1265 struct fc_frame_header *fh; 1497 struct fc_frame_header *fh;
1266 struct fc_rport_libfc_priv *rdata = rport->dd_data; 1498 struct fc_rport_priv *rdata;
1267 struct fc_lport *lport = rdata->local_port; 1499 u32 sid;
1268
1269 fh = fc_frame_header_get(fp);
1270 1500
1271 FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n", 1501 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1272 fc_rport_state(rport));
1273 1502
1274 if (rdata->rp_state == RPORT_ST_NONE) { 1503 fh = fc_frame_header_get(fp);
1275 fc_frame_free(fp); 1504 sid = ntoh24(fh->fh_s_id);
1276 return;
1277 }
1278 1505
1279 rdata->event = RPORT_EV_LOGO; 1506 mutex_lock(&lport->disc.disc_mutex);
1280 fc_rport_state_enter(rport, RPORT_ST_NONE); 1507 rdata = lport->tt.rport_lookup(lport, sid);
1281 queue_work(rport_event_queue, &rdata->event_work); 1508 if (rdata) {
1509 mutex_lock(&rdata->rp_mutex);
1510 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1511 fc_rport_state(rdata));
1282 1512
1283 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); 1513 /*
1514 * If the remote port was created due to discovery,
1515 * log back in. It may have seen a stale RSCN about us.
1516 */
1517 if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
1518 fc_rport_enter_plogi(rdata);
1519 else
1520 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1521 mutex_unlock(&rdata->rp_mutex);
1522 } else
1523 FC_RPORT_ID_DBG(lport, sid,
1524 "Received LOGO from non-logged-in port\n");
1525 mutex_unlock(&lport->disc.disc_mutex);
1284 fc_frame_free(fp); 1526 fc_frame_free(fp);
1285} 1527}
1286 1528
@@ -1291,8 +1533,11 @@ static void fc_rport_flush_queue(void)
1291 1533
1292int fc_rport_init(struct fc_lport *lport) 1534int fc_rport_init(struct fc_lport *lport)
1293{ 1535{
1536 if (!lport->tt.rport_lookup)
1537 lport->tt.rport_lookup = fc_rport_lookup;
1538
1294 if (!lport->tt.rport_create) 1539 if (!lport->tt.rport_create)
1295 lport->tt.rport_create = fc_rport_rogue_create; 1540 lport->tt.rport_create = fc_rport_create;
1296 1541
1297 if (!lport->tt.rport_login) 1542 if (!lport->tt.rport_login)
1298 lport->tt.rport_login = fc_rport_login; 1543 lport->tt.rport_login = fc_rport_login;
@@ -1306,6 +1551,9 @@ int fc_rport_init(struct fc_lport *lport)
1306 if (!lport->tt.rport_flush_queue) 1551 if (!lport->tt.rport_flush_queue)
1307 lport->tt.rport_flush_queue = fc_rport_flush_queue; 1552 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1308 1553
1554 if (!lport->tt.rport_destroy)
1555 lport->tt.rport_destroy = fc_rport_destroy;
1556
1309 return 0; 1557 return 0;
1310} 1558}
1311EXPORT_SYMBOL(fc_rport_init); 1559EXPORT_SYMBOL(fc_rport_init);
@@ -1327,8 +1575,8 @@ EXPORT_SYMBOL(fc_destroy_rport);
1327 1575
1328void fc_rport_terminate_io(struct fc_rport *rport) 1576void fc_rport_terminate_io(struct fc_rport *rport)
1329{ 1577{
1330 struct fc_rport_libfc_priv *rdata = rport->dd_data; 1578 struct fc_rport_libfc_priv *rp = rport->dd_data;
1331 struct fc_lport *lport = rdata->local_port; 1579 struct fc_lport *lport = rp->local_port;
1332 1580
1333 lport->tt.exch_mgr_reset(lport, 0, rport->port_id); 1581 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1334 lport->tt.exch_mgr_reset(lport, rport->port_id, 0); 1582 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index a751f6230c22..8dc73c489a17 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -109,12 +109,9 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
109} 109}
110EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); 110EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
111 111
112void 112static void __iscsi_update_cmdsn(struct iscsi_session *session,
113iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 113 uint32_t exp_cmdsn, uint32_t max_cmdsn)
114{ 114{
115 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
116 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
117
118 /* 115 /*
119 * standard specifies this check for when to update expected and 116 * standard specifies this check for when to update expected and
120 * max sequence numbers 117 * max sequence numbers
@@ -138,6 +135,12 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
138 iscsi_conn_queue_work(session->leadconn); 135 iscsi_conn_queue_work(session->leadconn);
139 } 136 }
140} 137}
138
139void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
140{
141 __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
142 be32_to_cpu(hdr->max_cmdsn));
143}
141EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 144EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
142 145
143/** 146/**
@@ -301,8 +304,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
301 hdr->flags = ISCSI_ATTR_SIMPLE; 304 hdr->flags = ISCSI_ATTR_SIMPLE;
302 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 305 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
303 memcpy(task->lun, hdr->lun, sizeof(task->lun)); 306 memcpy(task->lun, hdr->lun, sizeof(task->lun));
304 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
305 session->cmdsn++;
306 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 307 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
307 cmd_len = sc->cmd_len; 308 cmd_len = sc->cmd_len;
308 if (cmd_len < ISCSI_CDB_SIZE) 309 if (cmd_len < ISCSI_CDB_SIZE)
@@ -388,6 +389,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
388 return -EIO; 389 return -EIO;
389 390
390 task->state = ISCSI_TASK_RUNNING; 391 task->state = ISCSI_TASK_RUNNING;
392 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
393 session->cmdsn++;
391 394
392 conn->scsicmd_pdus_cnt++; 395 conn->scsicmd_pdus_cnt++;
393 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " 396 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -499,6 +502,31 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
499 __iscsi_put_task(task); 502 __iscsi_put_task(task);
500} 503}
501 504
505/**
506 * iscsi_complete_scsi_task - finish scsi task normally
507 * @task: iscsi task for scsi cmd
508 * @exp_cmdsn: expected cmd sn in cpu format
509 * @max_cmdsn: max cmd sn in cpu format
510 *
511 * This is used when drivers do not need or cannot perform
512 * lower level pdu processing.
513 *
514 * Called with session lock
515 */
516void iscsi_complete_scsi_task(struct iscsi_task *task,
517 uint32_t exp_cmdsn, uint32_t max_cmdsn)
518{
519 struct iscsi_conn *conn = task->conn;
520
521 ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
522
523 conn->last_recv = jiffies;
524 __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
525 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
526}
527EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
528
529
502/* 530/*
503 * session lock must be held and if not called for a task that is 531 * session lock must be held and if not called for a task that is
504 * still pending or from the xmit thread, then xmit thread must 532 * still pending or from the xmit thread, then xmit thread must
@@ -857,27 +885,102 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
857 } 885 }
858} 886}
859 887
888static int iscsi_nop_out_rsp(struct iscsi_task *task,
889 struct iscsi_nopin *nop, char *data, int datalen)
890{
891 struct iscsi_conn *conn = task->conn;
892 int rc = 0;
893
894 if (conn->ping_task != task) {
895 /*
896 * If this is not in response to one of our
897 * nops then it must be from userspace.
898 */
899 if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
900 data, datalen))
901 rc = ISCSI_ERR_CONN_FAILED;
902 } else
903 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
904 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
905 return rc;
906}
907
860static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 908static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
861 char *data, int datalen) 909 char *data, int datalen)
862{ 910{
863 struct iscsi_reject *reject = (struct iscsi_reject *)hdr; 911 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
864 struct iscsi_hdr rejected_pdu; 912 struct iscsi_hdr rejected_pdu;
913 int opcode, rc = 0;
865 914
866 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; 915 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
867 916
868 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) { 917 if (ntoh24(reject->dlength) > datalen ||
869 if (ntoh24(reject->dlength) > datalen) 918 ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
870 return ISCSI_ERR_PROTO; 919 iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
920 "pdu. Invalid data length (pdu dlength "
921 "%u, datalen %d\n", ntoh24(reject->dlength),
922 datalen);
923 return ISCSI_ERR_PROTO;
924 }
925 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
926 opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
927
928 switch (reject->reason) {
929 case ISCSI_REASON_DATA_DIGEST_ERROR:
930 iscsi_conn_printk(KERN_ERR, conn,
931 "pdu (op 0x%x itt 0x%x) rejected "
932 "due to DataDigest error.\n",
933 rejected_pdu.itt, opcode);
934 break;
935 case ISCSI_REASON_IMM_CMD_REJECT:
936 iscsi_conn_printk(KERN_ERR, conn,
937 "pdu (op 0x%x itt 0x%x) rejected. Too many "
938 "immediate commands.\n",
939 rejected_pdu.itt, opcode);
940 /*
941 * We only send one TMF at a time so if the target could not
942 * handle it, then it should get fixed (RFC mandates that
943 * a target can handle one immediate TMF per conn).
944 *
945 * For nops-outs, we could have sent more than one if
946 * the target is sending us lots of nop-ins
947 */
948 if (opcode != ISCSI_OP_NOOP_OUT)
949 return 0;
871 950
872 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 951 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
873 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 952 /*
874 iscsi_conn_printk(KERN_ERR, conn, 953 * nop-out in response to target's nop-out rejected.
875 "pdu (op 0x%x) rejected " 954 * Just resend.
876 "due to DataDigest error.\n", 955 */
877 rejected_pdu.opcode); 956 iscsi_send_nopout(conn,
957 (struct iscsi_nopin*)&rejected_pdu);
958 else {
959 struct iscsi_task *task;
960 /*
961 * Our nop as ping got dropped. We know the target
962 * and transport are ok so just clean up
963 */
964 task = iscsi_itt_to_task(conn, rejected_pdu.itt);
965 if (!task) {
966 iscsi_conn_printk(KERN_ERR, conn,
967 "Invalid pdu reject. Could "
968 "not lookup rejected task.\n");
969 rc = ISCSI_ERR_BAD_ITT;
970 } else
971 rc = iscsi_nop_out_rsp(task,
972 (struct iscsi_nopin*)&rejected_pdu,
973 NULL, 0);
878 } 974 }
975 break;
976 default:
977 iscsi_conn_printk(KERN_ERR, conn,
978 "pdu (op 0x%x itt 0x%x) rejected. Reason "
979 "code 0x%x\n", rejected_pdu.itt,
980 rejected_pdu.opcode, reject->reason);
981 break;
879 } 982 }
880 return 0; 983 return rc;
881} 984}
882 985
883/** 986/**
@@ -1038,15 +1141,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
1038 } 1141 }
1039 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 1142 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
1040 1143
1041 if (conn->ping_task != task) 1144 rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
1042 /* 1145 data, datalen);
1043 * If this is not in response to one of our
1044 * nops then it must be from userspace.
1045 */
1046 goto recv_pdu;
1047
1048 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
1049 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1050 break; 1146 break;
1051 default: 1147 default:
1052 rc = ISCSI_ERR_BAD_OPCODE; 1148 rc = ISCSI_ERR_BAD_OPCODE;
@@ -1212,6 +1308,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
1212 struct iscsi_task *task = conn->task; 1308 struct iscsi_task *task = conn->task;
1213 int rc; 1309 int rc;
1214 1310
1311 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
1312 return -ENODATA;
1313
1215 __iscsi_get_task(task); 1314 __iscsi_get_task(task);
1216 spin_unlock_bh(&conn->session->lock); 1315 spin_unlock_bh(&conn->session->lock);
1217 rc = conn->session->tt->xmit_task(task); 1316 rc = conn->session->tt->xmit_task(task);
@@ -1261,7 +1360,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
1261 int rc = 0; 1360 int rc = 0;
1262 1361
1263 spin_lock_bh(&conn->session->lock); 1362 spin_lock_bh(&conn->session->lock);
1264 if (unlikely(conn->suspend_tx)) { 1363 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1265 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); 1364 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
1266 spin_unlock_bh(&conn->session->lock); 1365 spin_unlock_bh(&conn->session->lock);
1267 return -ENODATA; 1366 return -ENODATA;
@@ -1270,7 +1369,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
1270 if (conn->task) { 1369 if (conn->task) {
1271 rc = iscsi_xmit_task(conn); 1370 rc = iscsi_xmit_task(conn);
1272 if (rc) 1371 if (rc)
1273 goto again; 1372 goto done;
1274 } 1373 }
1275 1374
1276 /* 1375 /*
@@ -1290,7 +1389,7 @@ check_mgmt:
1290 } 1389 }
1291 rc = iscsi_xmit_task(conn); 1390 rc = iscsi_xmit_task(conn);
1292 if (rc) 1391 if (rc)
1293 goto again; 1392 goto done;
1294 } 1393 }
1295 1394
1296 /* process pending command queue */ 1395 /* process pending command queue */
@@ -1311,14 +1410,14 @@ check_mgmt:
1311 list_add_tail(&conn->task->running, 1410 list_add_tail(&conn->task->running,
1312 &conn->cmdqueue); 1411 &conn->cmdqueue);
1313 conn->task = NULL; 1412 conn->task = NULL;
1314 goto again; 1413 goto done;
1315 } else 1414 } else
1316 fail_scsi_task(conn->task, DID_ABORT); 1415 fail_scsi_task(conn->task, DID_ABORT);
1317 continue; 1416 continue;
1318 } 1417 }
1319 rc = iscsi_xmit_task(conn); 1418 rc = iscsi_xmit_task(conn);
1320 if (rc) 1419 if (rc)
1321 goto again; 1420 goto done;
1322 /* 1421 /*
1323 * we could continuously get new task requests so 1422 * we could continuously get new task requests so
1324 * we need to check the mgmt queue for nops that need to 1423 * we need to check the mgmt queue for nops that need to
@@ -1344,16 +1443,14 @@ check_mgmt:
1344 conn->task->state = ISCSI_TASK_RUNNING; 1443 conn->task->state = ISCSI_TASK_RUNNING;
1345 rc = iscsi_xmit_task(conn); 1444 rc = iscsi_xmit_task(conn);
1346 if (rc) 1445 if (rc)
1347 goto again; 1446 goto done;
1348 if (!list_empty(&conn->mgmtqueue)) 1447 if (!list_empty(&conn->mgmtqueue))
1349 goto check_mgmt; 1448 goto check_mgmt;
1350 } 1449 }
1351 spin_unlock_bh(&conn->session->lock); 1450 spin_unlock_bh(&conn->session->lock);
1352 return -ENODATA; 1451 return -ENODATA;
1353 1452
1354again: 1453done:
1355 if (unlikely(conn->suspend_tx))
1356 rc = -ENODATA;
1357 spin_unlock_bh(&conn->session->lock); 1454 spin_unlock_bh(&conn->session->lock);
1358 return rc; 1455 return rc;
1359} 1456}
@@ -1474,6 +1571,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1474 goto fault; 1571 goto fault;
1475 } 1572 }
1476 1573
1574 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1575 reason = FAILURE_SESSION_IN_RECOVERY;
1576 sc->result = DID_REQUEUE;
1577 goto fault;
1578 }
1579
1477 if (iscsi_check_cmdsn_window_closed(conn)) { 1580 if (iscsi_check_cmdsn_window_closed(conn)) {
1478 reason = FAILURE_WINDOW_CLOSED; 1581 reason = FAILURE_WINDOW_CLOSED;
1479 goto reject; 1582 goto reject;
@@ -1497,6 +1600,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1497 } 1600 }
1498 } 1601 }
1499 if (session->tt->xmit_task(task)) { 1602 if (session->tt->xmit_task(task)) {
1603 session->cmdsn--;
1500 reason = FAILURE_SESSION_NOT_READY; 1604 reason = FAILURE_SESSION_NOT_READY;
1501 goto prepd_reject; 1605 goto prepd_reject;
1502 } 1606 }
@@ -1712,6 +1816,33 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1712 } 1816 }
1713} 1817}
1714 1818
1819/**
1820 * iscsi_suspend_queue - suspend iscsi_queuecommand
1821 * @conn: iscsi conn to stop queueing IO on
1822 *
1823 * This grabs the session lock to make sure no one is in
1824 * xmit_task/queuecommand, and then sets suspend to prevent
1825 * new commands from being queued. This only needs to be called
1826 * by offload drivers that need to sync a path like ep disconnect
1827 * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
1828 * will call iscsi_start_tx and iscsi_unblock_session when in FFP.
1829 */
1830void iscsi_suspend_queue(struct iscsi_conn *conn)
1831{
1832 spin_lock_bh(&conn->session->lock);
1833 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1834 spin_unlock_bh(&conn->session->lock);
1835}
1836EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
1837
1838/**
1839 * iscsi_suspend_tx - suspend iscsi_data_xmit
1840 * @conn: iscsi conn tp stop processing IO on.
1841 *
1842 * This function sets the suspend bit to prevent iscsi_data_xmit
1843 * from sending new IO, and if work is queued on the xmit thread
1844 * it will wait for it to be completed.
1845 */
1715void iscsi_suspend_tx(struct iscsi_conn *conn) 1846void iscsi_suspend_tx(struct iscsi_conn *conn)
1716{ 1847{
1717 struct Scsi_Host *shost = conn->session->host; 1848 struct Scsi_Host *shost = conn->session->host;
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 2742ae8a3d09..9ad38e81e343 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -124,6 +124,7 @@ static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
124 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); 124 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
125 kfree(ring[i]); 125 kfree(ring[i]);
126 } 126 }
127 kfree(ring);
127} 128}
128 129
129int srp_target_alloc(struct srp_target *target, struct device *dev, 130int srp_target_alloc(struct srp_target *target, struct device *dev,
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 1c286707dd5f..ad05d6edb8f6 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -28,4 +28,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
28 28
29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ 29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \ 30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
31 lpfc_vport.o lpfc_debugfs.o 31 lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1877d9811831..aa10f7951634 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -312,6 +312,7 @@ struct lpfc_vport {
312#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ 312#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
313#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ 313#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
314#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ 314#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
315#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
315 316
316 uint32_t ct_flags; 317 uint32_t ct_flags;
317#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ 318#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -440,6 +441,12 @@ enum intr_type_t {
440 MSIX, 441 MSIX,
441}; 442};
442 443
444struct unsol_rcv_ct_ctx {
445 uint32_t ctxt_id;
446 uint32_t SID;
447 uint32_t oxid;
448};
449
443struct lpfc_hba { 450struct lpfc_hba {
444 /* SCSI interface function jump table entries */ 451 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf) 452 int (*lpfc_new_scsi_buf)
@@ -525,6 +532,8 @@ struct lpfc_hba {
525#define FCP_XRI_ABORT_EVENT 0x20 532#define FCP_XRI_ABORT_EVENT 0x20
526#define ELS_XRI_ABORT_EVENT 0x40 533#define ELS_XRI_ABORT_EVENT 0x40
527#define ASYNC_EVENT 0x80 534#define ASYNC_EVENT 0x80
535#define LINK_DISABLED 0x100 /* Link disabled by user */
536#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
528 struct lpfc_dmabuf slim2p; 537 struct lpfc_dmabuf slim2p;
529 538
530 MAILBOX_t *mbox; 539 MAILBOX_t *mbox;
@@ -616,6 +625,8 @@ struct lpfc_hba {
616 uint32_t hbq_count; /* Count of configured HBQs */ 625 uint32_t hbq_count; /* Count of configured HBQs */
617 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 626 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
618 627
628 uint32_t fcp_qidx; /* next work queue to post work to */
629
619 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 630 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
620 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ 631 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
621 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 632 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
@@ -682,6 +693,7 @@ struct lpfc_hba {
682 struct pci_pool *lpfc_mbuf_pool; 693 struct pci_pool *lpfc_mbuf_pool;
683 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ 694 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
684 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ 695 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
696 struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
685 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 697 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
686 698
687 mempool_t *mbox_mem_pool; 699 mempool_t *mbox_mem_pool;
@@ -763,11 +775,18 @@ struct lpfc_hba {
763/* Maximum number of events that can be outstanding at any time*/ 775/* Maximum number of events that can be outstanding at any time*/
764#define LPFC_MAX_EVT_COUNT 512 776#define LPFC_MAX_EVT_COUNT 512
765 atomic_t fast_event_count; 777 atomic_t fast_event_count;
778 uint32_t fcoe_eventtag;
779 uint32_t fcoe_eventtag_at_fcf_scan;
766 struct lpfc_fcf fcf; 780 struct lpfc_fcf fcf;
767 uint8_t fc_map[3]; 781 uint8_t fc_map[3];
768 uint8_t valid_vlan; 782 uint8_t valid_vlan;
769 uint16_t vlan_id; 783 uint16_t vlan_id;
770 struct list_head fcf_conn_rec_list; 784 struct list_head fcf_conn_rec_list;
785
786 struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
787 struct list_head ct_ev_waiters;
788 struct unsol_rcv_ct_ctx ct_ctx[64];
789 uint32_t ctx_idx;
771}; 790};
772 791
773static inline struct Scsi_Host * 792static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index fc07be5fbce9..e1a30a16a9fa 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -394,7 +394,12 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
394 case LPFC_INIT_MBX_CMDS: 394 case LPFC_INIT_MBX_CMDS:
395 case LPFC_LINK_DOWN: 395 case LPFC_LINK_DOWN:
396 case LPFC_HBA_ERROR: 396 case LPFC_HBA_ERROR:
397 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); 397 if (phba->hba_flag & LINK_DISABLED)
398 len += snprintf(buf + len, PAGE_SIZE-len,
399 "Link Down - User disabled\n");
400 else
401 len += snprintf(buf + len, PAGE_SIZE-len,
402 "Link Down\n");
398 break; 403 break;
399 case LPFC_LINK_UP: 404 case LPFC_LINK_UP:
400 case LPFC_CLEAR_LA: 405 case LPFC_CLEAR_LA:
@@ -4127,6 +4132,9 @@ struct fc_function_template lpfc_transport_functions = {
4127 .vport_disable = lpfc_vport_disable, 4132 .vport_disable = lpfc_vport_disable,
4128 4133
4129 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 4134 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
4135
4136 .bsg_request = lpfc_bsg_request,
4137 .bsg_timeout = lpfc_bsg_timeout,
4130}; 4138};
4131 4139
4132struct fc_function_template lpfc_vport_transport_functions = { 4140struct fc_function_template lpfc_vport_transport_functions = {
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
new file mode 100644
index 000000000000..da6bf5aac9dd
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -0,0 +1,904 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#include <linux/interrupt.h>
22#include <linux/mempool.h>
23#include <linux/pci.h>
24
25#include <scsi/scsi.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport_fc.h>
28#include <scsi/scsi_bsg_fc.h>
29
30#include "lpfc_hw4.h"
31#include "lpfc_hw.h"
32#include "lpfc_sli.h"
33#include "lpfc_sli4.h"
34#include "lpfc_nl.h"
35#include "lpfc_disc.h"
36#include "lpfc_scsi.h"
37#include "lpfc.h"
38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h"
40#include "lpfc_vport.h"
41#include "lpfc_version.h"
42
43/**
44 * lpfc_bsg_rport_ct - send a CT command from a bsg request
45 * @job: fc_bsg_job to handle
46 */
47static int
48lpfc_bsg_rport_ct(struct fc_bsg_job *job)
49{
50 struct Scsi_Host *shost = job->shost;
51 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
52 struct lpfc_hba *phba = vport->phba;
53 struct lpfc_rport_data *rdata = job->rport->dd_data;
54 struct lpfc_nodelist *ndlp = rdata->pnode;
55 struct ulp_bde64 *bpl = NULL;
56 uint32_t timeout;
57 struct lpfc_iocbq *cmdiocbq = NULL;
58 struct lpfc_iocbq *rspiocbq = NULL;
59 IOCB_t *cmd;
60 IOCB_t *rsp;
61 struct lpfc_dmabuf *bmp = NULL;
62 int request_nseg;
63 int reply_nseg;
64 struct scatterlist *sgel = NULL;
65 int numbde;
66 dma_addr_t busaddr;
67 int rc = 0;
68
69 /* in case no data is transferred */
70 job->reply->reply_payload_rcv_len = 0;
71
72 if (!lpfc_nlp_get(ndlp)) {
73 job->reply->result = -ENODEV;
74 return 0;
75 }
76
77 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
78 rc = -ENODEV;
79 goto free_ndlp_exit;
80 }
81
82 spin_lock_irq(shost->host_lock);
83 cmdiocbq = lpfc_sli_get_iocbq(phba);
84 if (!cmdiocbq) {
85 rc = -ENOMEM;
86 spin_unlock_irq(shost->host_lock);
87 goto free_ndlp_exit;
88 }
89 cmd = &cmdiocbq->iocb;
90
91 rspiocbq = lpfc_sli_get_iocbq(phba);
92 if (!rspiocbq) {
93 rc = -ENOMEM;
94 goto free_cmdiocbq;
95 }
96 spin_unlock_irq(shost->host_lock);
97
98 rsp = &rspiocbq->iocb;
99
100 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
101 if (!bmp) {
102 rc = -ENOMEM;
103 spin_lock_irq(shost->host_lock);
104 goto free_rspiocbq;
105 }
106
107 spin_lock_irq(shost->host_lock);
108 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
109 if (!bmp->virt) {
110 rc = -ENOMEM;
111 goto free_bmp;
112 }
113 spin_unlock_irq(shost->host_lock);
114
115 INIT_LIST_HEAD(&bmp->list);
116 bpl = (struct ulp_bde64 *) bmp->virt;
117
118 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
119 job->request_payload.sg_cnt, DMA_TO_DEVICE);
120 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
121 busaddr = sg_dma_address(sgel);
122 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
123 bpl->tus.f.bdeSize = sg_dma_len(sgel);
124 bpl->tus.w = cpu_to_le32(bpl->tus.w);
125 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
126 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
127 bpl++;
128 }
129
130 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
131 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
132 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
133 busaddr = sg_dma_address(sgel);
134 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
135 bpl->tus.f.bdeSize = sg_dma_len(sgel);
136 bpl->tus.w = cpu_to_le32(bpl->tus.w);
137 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
138 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
139 bpl++;
140 }
141
142 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
143 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
144 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
145 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
146 cmd->un.genreq64.bdl.bdeSize =
147 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
148 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
149 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
150 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
151 cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
152 cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
153 cmd->ulpBdeCount = 1;
154 cmd->ulpLe = 1;
155 cmd->ulpClass = CLASS3;
156 cmd->ulpContext = ndlp->nlp_rpi;
157 cmd->ulpOwner = OWN_CHIP;
158 cmdiocbq->vport = phba->pport;
159 cmdiocbq->context1 = NULL;
160 cmdiocbq->context2 = NULL;
161 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
162
163 timeout = phba->fc_ratov * 2;
164 job->dd_data = cmdiocbq;
165
166 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
167 timeout + LPFC_DRVR_TIMEOUT);
168
169 if (rc != IOCB_TIMEDOUT) {
170 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
171 job->request_payload.sg_cnt, DMA_TO_DEVICE);
172 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
173 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
174 }
175
176 if (rc == IOCB_TIMEDOUT) {
177 lpfc_sli_release_iocbq(phba, rspiocbq);
178 rc = -EACCES;
179 goto free_ndlp_exit;
180 }
181
182 if (rc != IOCB_SUCCESS) {
183 rc = -EACCES;
184 goto free_outdmp;
185 }
186
187 if (rsp->ulpStatus) {
188 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
189 switch (rsp->un.ulpWord[4] & 0xff) {
190 case IOERR_SEQUENCE_TIMEOUT:
191 rc = -ETIMEDOUT;
192 break;
193 case IOERR_INVALID_RPI:
194 rc = -EFAULT;
195 break;
196 default:
197 rc = -EACCES;
198 break;
199 }
200 goto free_outdmp;
201 }
202 } else
203 job->reply->reply_payload_rcv_len =
204 rsp->un.genreq64.bdl.bdeSize;
205
206free_outdmp:
207 spin_lock_irq(shost->host_lock);
208 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
209free_bmp:
210 kfree(bmp);
211free_rspiocbq:
212 lpfc_sli_release_iocbq(phba, rspiocbq);
213free_cmdiocbq:
214 lpfc_sli_release_iocbq(phba, cmdiocbq);
215 spin_unlock_irq(shost->host_lock);
216free_ndlp_exit:
217 lpfc_nlp_put(ndlp);
218
219 /* make error code available to userspace */
220 job->reply->result = rc;
221 /* complete the job back to userspace */
222 job->job_done(job);
223
224 return 0;
225}
226
227/**
228 * lpfc_bsg_rport_els - send an ELS command from a bsg request
229 * @job: fc_bsg_job to handle
230 */
231static int
232lpfc_bsg_rport_els(struct fc_bsg_job *job)
233{
234 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
235 struct lpfc_hba *phba = vport->phba;
236 struct lpfc_rport_data *rdata = job->rport->dd_data;
237 struct lpfc_nodelist *ndlp = rdata->pnode;
238
239 uint32_t elscmd;
240 uint32_t cmdsize;
241 uint32_t rspsize;
242 struct lpfc_iocbq *rspiocbq;
243 struct lpfc_iocbq *cmdiocbq;
244 IOCB_t *rsp;
245 uint16_t rpi = 0;
246 struct lpfc_dmabuf *pcmd;
247 struct lpfc_dmabuf *prsp;
248 struct lpfc_dmabuf *pbuflist = NULL;
249 struct ulp_bde64 *bpl;
250 int iocb_status;
251 int request_nseg;
252 int reply_nseg;
253 struct scatterlist *sgel = NULL;
254 int numbde;
255 dma_addr_t busaddr;
256 int rc = 0;
257
258 /* in case no data is transferred */
259 job->reply->reply_payload_rcv_len = 0;
260
261 if (!lpfc_nlp_get(ndlp)) {
262 rc = -ENODEV;
263 goto out;
264 }
265
266 elscmd = job->request->rqst_data.r_els.els_code;
267 cmdsize = job->request_payload.payload_len;
268 rspsize = job->reply_payload.payload_len;
269 rspiocbq = lpfc_sli_get_iocbq(phba);
270 if (!rspiocbq) {
271 lpfc_nlp_put(ndlp);
272 rc = -ENOMEM;
273 goto out;
274 }
275
276 rsp = &rspiocbq->iocb;
277 rpi = ndlp->nlp_rpi;
278
279 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
280 ndlp->nlp_DID, elscmd);
281
282 if (!cmdiocbq) {
283 lpfc_sli_release_iocbq(phba, rspiocbq);
284 return -EIO;
285 }
286
287 job->dd_data = cmdiocbq;
288 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
289 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
290
291 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
292 kfree(pcmd);
293 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
294 kfree(prsp);
295 cmdiocbq->context2 = NULL;
296
297 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
298 bpl = (struct ulp_bde64 *) pbuflist->virt;
299
300 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
301 job->request_payload.sg_cnt, DMA_TO_DEVICE);
302
303 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
304 busaddr = sg_dma_address(sgel);
305 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
306 bpl->tus.f.bdeSize = sg_dma_len(sgel);
307 bpl->tus.w = cpu_to_le32(bpl->tus.w);
308 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
309 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
310 bpl++;
311 }
312
313 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
314 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
315 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
316 busaddr = sg_dma_address(sgel);
317 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
318 bpl->tus.f.bdeSize = sg_dma_len(sgel);
319 bpl->tus.w = cpu_to_le32(bpl->tus.w);
320 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
321 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
322 bpl++;
323 }
324
325 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
326 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 cmdiocbq->iocb.ulpContext = rpi;
328 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
329 cmdiocbq->context1 = NULL;
330 cmdiocbq->context2 = NULL;
331
332 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
333 rspiocbq, (phba->fc_ratov * 2)
334 + LPFC_DRVR_TIMEOUT);
335
336 /* release the new ndlp once the iocb completes */
337 lpfc_nlp_put(ndlp);
338 if (iocb_status != IOCB_TIMEDOUT) {
339 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
340 job->request_payload.sg_cnt, DMA_TO_DEVICE);
341 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
342 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
343 }
344
345 if (iocb_status == IOCB_SUCCESS) {
346 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
347 job->reply->reply_payload_rcv_len =
348 rsp->un.elsreq64.bdl.bdeSize;
349 rc = 0;
350 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
351 struct fc_bsg_ctels_reply *els_reply;
352 /* LS_RJT data returned in word 4 */
353 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
354
355 els_reply = &job->reply->reply_data.ctels_reply;
356 job->reply->result = 0;
357 els_reply->status = FC_CTELS_STATUS_REJECT;
358 els_reply->rjt_data.action = rjt_data[0];
359 els_reply->rjt_data.reason_code = rjt_data[1];
360 els_reply->rjt_data.reason_explanation = rjt_data[2];
361 els_reply->rjt_data.vendor_unique = rjt_data[3];
362 } else
363 rc = -EIO;
364 } else
365 rc = -EIO;
366
367 if (iocb_status != IOCB_TIMEDOUT)
368 lpfc_els_free_iocb(phba, cmdiocbq);
369
370 lpfc_sli_release_iocbq(phba, rspiocbq);
371
372out:
373 /* make error code available to userspace */
374 job->reply->result = rc;
375 /* complete the job back to userspace */
376 job->job_done(job);
377
378 return 0;
379}
380
381struct lpfc_ct_event {
382 struct list_head node;
383 int ref;
384 wait_queue_head_t wq;
385
386 /* Event type and waiter identifiers */
387 uint32_t type_mask;
388 uint32_t req_id;
389 uint32_t reg_id;
390
391 /* next two flags are here for the auto-delete logic */
392 unsigned long wait_time_stamp;
393 int waiting;
394
395 /* seen and not seen events */
396 struct list_head events_to_get;
397 struct list_head events_to_see;
398};
399
400struct event_data {
401 struct list_head node;
402 uint32_t type;
403 uint32_t immed_dat;
404 void *data;
405 uint32_t len;
406};
407
408static struct lpfc_ct_event *
409lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
410{
411 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
412 if (!evt)
413 return NULL;
414
415 INIT_LIST_HEAD(&evt->events_to_get);
416 INIT_LIST_HEAD(&evt->events_to_see);
417 evt->req_id = ev_req_id;
418 evt->reg_id = ev_reg_id;
419 evt->wait_time_stamp = jiffies;
420 init_waitqueue_head(&evt->wq);
421
422 return evt;
423}
424
425static void
426lpfc_ct_event_free(struct lpfc_ct_event *evt)
427{
428 struct event_data *ed;
429
430 list_del(&evt->node);
431
432 while (!list_empty(&evt->events_to_get)) {
433 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
434 list_del(&ed->node);
435 kfree(ed->data);
436 kfree(ed);
437 }
438
439 while (!list_empty(&evt->events_to_see)) {
440 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
441 list_del(&ed->node);
442 kfree(ed->data);
443 kfree(ed);
444 }
445
446 kfree(evt);
447}
448
449static inline void
450lpfc_ct_event_ref(struct lpfc_ct_event *evt)
451{
452 evt->ref++;
453}
454
455static inline void
456lpfc_ct_event_unref(struct lpfc_ct_event *evt)
457{
458 if (--evt->ref < 0)
459 lpfc_ct_event_free(evt);
460}
461
462#define SLI_CT_ELX_LOOPBACK 0x10
463
464enum ELX_LOOPBACK_CMD {
465 ELX_LOOPBACK_XRI_SETUP,
466 ELX_LOOPBACK_DATA,
467};
468
469/**
470 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
471 * @phba:
472 * @pring:
473 * @piocbq:
474 *
475 * This function is called when an unsolicited CT command is received. It
476 * forwards the event to any processes registerd to receive CT events.
477 */
478void
479lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
480 struct lpfc_iocbq *piocbq)
481{
482 uint32_t evt_req_id = 0;
483 uint32_t cmd;
484 uint32_t len;
485 struct lpfc_dmabuf *dmabuf = NULL;
486 struct lpfc_ct_event *evt;
487 struct event_data *evt_dat = NULL;
488 struct lpfc_iocbq *iocbq;
489 size_t offset = 0;
490 struct list_head head;
491 struct ulp_bde64 *bde;
492 dma_addr_t dma_addr;
493 int i;
494 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
495 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
496 struct lpfc_hbq_entry *hbqe;
497 struct lpfc_sli_ct_request *ct_req;
498
499 INIT_LIST_HEAD(&head);
500 list_add_tail(&head, &piocbq->list);
501
502 if (piocbq->iocb.ulpBdeCount == 0 ||
503 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
504 goto error_ct_unsol_exit;
505
506 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
507 dmabuf = bdeBuf1;
508 else {
509 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
510 piocbq->iocb.un.cont64[0].addrLow);
511 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
512 }
513
514 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
515 evt_req_id = ct_req->FsType;
516 cmd = ct_req->CommandResponse.bits.CmdRsp;
517 len = ct_req->CommandResponse.bits.Size;
518 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
519 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
520
521 mutex_lock(&phba->ct_event_mutex);
522 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
523 if (evt->req_id != evt_req_id)
524 continue;
525
526 lpfc_ct_event_ref(evt);
527
528 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
529 if (!evt_dat) {
530 lpfc_ct_event_unref(evt);
531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
532 "2614 Memory allocation failed for "
533 "CT event\n");
534 break;
535 }
536
537 mutex_unlock(&phba->ct_event_mutex);
538
539 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
540 /* take accumulated byte count from the last iocbq */
541 iocbq = list_entry(head.prev, typeof(*iocbq), list);
542 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
543 } else {
544 list_for_each_entry(iocbq, &head, list) {
545 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
546 evt_dat->len +=
547 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
548 }
549 }
550
551 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
552 if (!evt_dat->data) {
553 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
554 "2615 Memory allocation failed for "
555 "CT event data, size %d\n",
556 evt_dat->len);
557 kfree(evt_dat);
558 mutex_lock(&phba->ct_event_mutex);
559 lpfc_ct_event_unref(evt);
560 mutex_unlock(&phba->ct_event_mutex);
561 goto error_ct_unsol_exit;
562 }
563
564 list_for_each_entry(iocbq, &head, list) {
565 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
566 bdeBuf1 = iocbq->context2;
567 bdeBuf2 = iocbq->context3;
568 }
569 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
570 int size = 0;
571 if (phba->sli3_options &
572 LPFC_SLI3_HBQ_ENABLED) {
573 if (i == 0) {
574 hbqe = (struct lpfc_hbq_entry *)
575 &iocbq->iocb.un.ulpWord[0];
576 size = hbqe->bde.tus.f.bdeSize;
577 dmabuf = bdeBuf1;
578 } else if (i == 1) {
579 hbqe = (struct lpfc_hbq_entry *)
580 &iocbq->iocb.unsli3.
581 sli3Words[4];
582 size = hbqe->bde.tus.f.bdeSize;
583 dmabuf = bdeBuf2;
584 }
585 if ((offset + size) > evt_dat->len)
586 size = evt_dat->len - offset;
587 } else {
588 size = iocbq->iocb.un.cont64[i].
589 tus.f.bdeSize;
590 bde = &iocbq->iocb.un.cont64[i];
591 dma_addr = getPaddr(bde->addrHigh,
592 bde->addrLow);
593 dmabuf = lpfc_sli_ringpostbuf_get(phba,
594 pring, dma_addr);
595 }
596 if (!dmabuf) {
597 lpfc_printf_log(phba, KERN_ERR,
598 LOG_LIBDFC, "2616 No dmabuf "
599 "found for iocbq 0x%p\n",
600 iocbq);
601 kfree(evt_dat->data);
602 kfree(evt_dat);
603 mutex_lock(&phba->ct_event_mutex);
604 lpfc_ct_event_unref(evt);
605 mutex_unlock(&phba->ct_event_mutex);
606 goto error_ct_unsol_exit;
607 }
608 memcpy((char *)(evt_dat->data) + offset,
609 dmabuf->virt, size);
610 offset += size;
611 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
612 !(phba->sli3_options &
613 LPFC_SLI3_HBQ_ENABLED)) {
614 lpfc_sli_ringpostbuf_put(phba, pring,
615 dmabuf);
616 } else {
617 switch (cmd) {
618 case ELX_LOOPBACK_XRI_SETUP:
619 if (!(phba->sli3_options &
620 LPFC_SLI3_HBQ_ENABLED))
621 lpfc_post_buffer(phba,
622 pring,
623 1);
624 else
625 lpfc_in_buf_free(phba,
626 dmabuf);
627 break;
628 default:
629 if (!(phba->sli3_options &
630 LPFC_SLI3_HBQ_ENABLED))
631 lpfc_post_buffer(phba,
632 pring,
633 1);
634 break;
635 }
636 }
637 }
638 }
639
640 mutex_lock(&phba->ct_event_mutex);
641 if (phba->sli_rev == LPFC_SLI_REV4) {
642 evt_dat->immed_dat = phba->ctx_idx;
643 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
644 phba->ct_ctx[evt_dat->immed_dat].oxid =
645 piocbq->iocb.ulpContext;
646 phba->ct_ctx[evt_dat->immed_dat].SID =
647 piocbq->iocb.un.rcvels.remoteID;
648 } else
649 evt_dat->immed_dat = piocbq->iocb.ulpContext;
650
651 evt_dat->type = FC_REG_CT_EVENT;
652 list_add(&evt_dat->node, &evt->events_to_see);
653 wake_up_interruptible(&evt->wq);
654 lpfc_ct_event_unref(evt);
655 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
656 break;
657 }
658 mutex_unlock(&phba->ct_event_mutex);
659
660error_ct_unsol_exit:
661 if (!list_empty(&head))
662 list_del(&head);
663
664 return;
665}
666
667/**
668 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
669 * @job: SET_EVENT fc_bsg_job
670 */
671static int
672lpfc_bsg_set_event(struct fc_bsg_job *job)
673{
674 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
675 struct lpfc_hba *phba = vport->phba;
676 struct set_ct_event *event_req;
677 struct lpfc_ct_event *evt;
678 int rc = 0;
679
680 if (job->request_len <
681 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
682 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
683 "2612 Received SET_CT_EVENT below minimum "
684 "size\n");
685 return -EINVAL;
686 }
687
688 event_req = (struct set_ct_event *)
689 job->request->rqst_data.h_vendor.vendor_cmd;
690
691 mutex_lock(&phba->ct_event_mutex);
692 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
693 if (evt->reg_id == event_req->ev_reg_id) {
694 lpfc_ct_event_ref(evt);
695 evt->wait_time_stamp = jiffies;
696 break;
697 }
698 }
699 mutex_unlock(&phba->ct_event_mutex);
700
701 if (&evt->node == &phba->ct_ev_waiters) {
702 /* no event waiting struct yet - first call */
703 evt = lpfc_ct_event_new(event_req->ev_reg_id,
704 event_req->ev_req_id);
705 if (!evt) {
706 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
707 "2617 Failed allocation of event "
708 "waiter\n");
709 return -ENOMEM;
710 }
711
712 mutex_lock(&phba->ct_event_mutex);
713 list_add(&evt->node, &phba->ct_ev_waiters);
714 lpfc_ct_event_ref(evt);
715 mutex_unlock(&phba->ct_event_mutex);
716 }
717
718 evt->waiting = 1;
719 if (wait_event_interruptible(evt->wq,
720 !list_empty(&evt->events_to_see))) {
721 mutex_lock(&phba->ct_event_mutex);
722 lpfc_ct_event_unref(evt); /* release ref */
723 lpfc_ct_event_unref(evt); /* delete */
724 mutex_unlock(&phba->ct_event_mutex);
725 rc = -EINTR;
726 goto set_event_out;
727 }
728
729 evt->wait_time_stamp = jiffies;
730 evt->waiting = 0;
731
732 mutex_lock(&phba->ct_event_mutex);
733 list_move(evt->events_to_see.prev, &evt->events_to_get);
734 lpfc_ct_event_unref(evt); /* release ref */
735 mutex_unlock(&phba->ct_event_mutex);
736
737set_event_out:
738 /* set_event carries no reply payload */
739 job->reply->reply_payload_rcv_len = 0;
740 /* make error code available to userspace */
741 job->reply->result = rc;
742 /* complete the job back to userspace */
743 job->job_done(job);
744
745 return 0;
746}
747
748/**
749 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
750 * @job: GET_EVENT fc_bsg_job
751 */
752static int
753lpfc_bsg_get_event(struct fc_bsg_job *job)
754{
755 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
756 struct lpfc_hba *phba = vport->phba;
757 struct get_ct_event *event_req;
758 struct get_ct_event_reply *event_reply;
759 struct lpfc_ct_event *evt;
760 struct event_data *evt_dat = NULL;
761 int rc = 0;
762
763 if (job->request_len <
764 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
765 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
766 "2613 Received GET_CT_EVENT request below "
767 "minimum size\n");
768 return -EINVAL;
769 }
770
771 event_req = (struct get_ct_event *)
772 job->request->rqst_data.h_vendor.vendor_cmd;
773
774 event_reply = (struct get_ct_event_reply *)
775 job->reply->reply_data.vendor_reply.vendor_rsp;
776
777 mutex_lock(&phba->ct_event_mutex);
778 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
779 if (evt->reg_id == event_req->ev_reg_id) {
780 if (list_empty(&evt->events_to_get))
781 break;
782 lpfc_ct_event_ref(evt);
783 evt->wait_time_stamp = jiffies;
784 evt_dat = list_entry(evt->events_to_get.prev,
785 struct event_data, node);
786 list_del(&evt_dat->node);
787 break;
788 }
789 }
790 mutex_unlock(&phba->ct_event_mutex);
791
792 if (!evt_dat) {
793 job->reply->reply_payload_rcv_len = 0;
794 rc = -ENOENT;
795 goto error_get_event_exit;
796 }
797
798 if (evt_dat->len > job->reply_payload.payload_len) {
799 evt_dat->len = job->reply_payload.payload_len;
800 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
801 "2618 Truncated event data at %d "
802 "bytes\n",
803 job->reply_payload.payload_len);
804 }
805
806 event_reply->immed_data = evt_dat->immed_dat;
807
808 if (evt_dat->len > 0)
809 job->reply->reply_payload_rcv_len =
810 sg_copy_from_buffer(job->reply_payload.sg_list,
811 job->reply_payload.sg_cnt,
812 evt_dat->data, evt_dat->len);
813 else
814 job->reply->reply_payload_rcv_len = 0;
815 rc = 0;
816
817 if (evt_dat)
818 kfree(evt_dat->data);
819 kfree(evt_dat);
820 mutex_lock(&phba->ct_event_mutex);
821 lpfc_ct_event_unref(evt);
822 mutex_unlock(&phba->ct_event_mutex);
823
824error_get_event_exit:
825 /* make error code available to userspace */
826 job->reply->result = rc;
827 /* complete the job back to userspace */
828 job->job_done(job);
829
830 return rc;
831}
832
833/**
834 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
835 * @job: fc_bsg_job to handle
836 */
837static int
838lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
839{
840 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
841
842 switch (command) {
843 case LPFC_BSG_VENDOR_SET_CT_EVENT:
844 return lpfc_bsg_set_event(job);
845 break;
846
847 case LPFC_BSG_VENDOR_GET_CT_EVENT:
848 return lpfc_bsg_get_event(job);
849 break;
850
851 default:
852 return -EINVAL;
853 }
854}
855
856/**
857 * lpfc_bsg_request - handle a bsg request from the FC transport
858 * @job: fc_bsg_job to handle
859 */
860int
861lpfc_bsg_request(struct fc_bsg_job *job)
862{
863 uint32_t msgcode;
864 int rc = -EINVAL;
865
866 msgcode = job->request->msgcode;
867
868 switch (msgcode) {
869 case FC_BSG_HST_VENDOR:
870 rc = lpfc_bsg_hst_vendor(job);
871 break;
872 case FC_BSG_RPT_ELS:
873 rc = lpfc_bsg_rport_els(job);
874 break;
875 case FC_BSG_RPT_CT:
876 rc = lpfc_bsg_rport_ct(job);
877 break;
878 default:
879 break;
880 }
881
882 return rc;
883}
884
885/**
886 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
887 * @job: fc_bsg_job that has timed out
888 *
889 * This function just aborts the job's IOCB. The aborted IOCB will return to
890 * the waiting function which will handle passing the error back to userspace
891 */
892int
893lpfc_bsg_timeout(struct fc_bsg_job *job)
894{
895 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
896 struct lpfc_hba *phba = vport->phba;
897 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
898 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
899
900 if (cmdiocb)
901 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
902
903 return 0;
904}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index d2a922997c0f..0830f37409a3 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -21,9 +21,11 @@
21typedef int (*node_filter)(struct lpfc_nodelist *, void *); 21typedef int (*node_filter)(struct lpfc_nodelist *, void *);
22 22
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
25void lpfc_sli_read_link_ste(struct lpfc_hba *);
26void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 27void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 28int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); 29int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 30void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 31void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -135,6 +137,9 @@ int lpfc_els_disc_adisc(struct lpfc_vport *);
135int lpfc_els_disc_plogi(struct lpfc_vport *); 137int lpfc_els_disc_plogi(struct lpfc_vport *);
136void lpfc_els_timeout(unsigned long); 138void lpfc_els_timeout(unsigned long);
137void lpfc_els_timeout_handler(struct lpfc_vport *); 139void lpfc_els_timeout_handler(struct lpfc_vport *);
140struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
141 uint8_t, struct lpfc_nodelist *,
142 uint32_t, uint32_t);
138void lpfc_hb_timeout_handler(struct lpfc_hba *); 143void lpfc_hb_timeout_handler(struct lpfc_hba *);
139 144
140void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 145void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -182,11 +187,12 @@ int lpfc_mbox_dev_check(struct lpfc_hba *);
182int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 187int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); 188void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); 189void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); 190void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); 191void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); 192void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); 193void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); 194void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
195int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
190 196
191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 197void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
192 uint32_t , LPFC_MBOXQ_t *); 198 uint32_t , LPFC_MBOXQ_t *);
@@ -234,6 +240,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, 240int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
235 struct lpfc_iocbq *, uint32_t); 241 struct lpfc_iocbq *, uint32_t);
236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 242void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
243void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 244void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
238void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 245void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
239int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 246int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -360,3 +367,8 @@ void lpfc_start_fdiscs(struct lpfc_hba *phba);
360#define HBA_EVENT_LINK_UP 2 367#define HBA_EVENT_LINK_UP 2
361#define HBA_EVENT_LINK_DOWN 3 368#define HBA_EVENT_LINK_DOWN 3
362 369
370/* functions to support SGIOv4/bsg interface */
371int lpfc_bsg_request(struct fc_bsg_job *);
372int lpfc_bsg_timeout(struct fc_bsg_job *);
373void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
374 struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0e532f072eb3..9df7ed38e1be 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -97,6 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
97 struct list_head head; 97 struct list_head head;
98 struct lpfc_dmabuf *bdeBuf; 98 struct lpfc_dmabuf *bdeBuf;
99 99
100 lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
101
100 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 102 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
101 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 103 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
102 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 104 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f72fdf23bf1b..45337cd23feb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -146,7 +146,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
146 * Pointer to the newly allocated/prepared els iocb data structure 146 * Pointer to the newly allocated/prepared els iocb data structure
147 * NULL - when els iocb data structure allocation/preparation failed 147 * NULL - when els iocb data structure allocation/preparation failed
148 **/ 148 **/
149static struct lpfc_iocbq * 149struct lpfc_iocbq *
150lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 150lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
151 uint16_t cmdSize, uint8_t retry, 151 uint16_t cmdSize, uint8_t retry,
152 struct lpfc_nodelist *ndlp, uint32_t did, 152 struct lpfc_nodelist *ndlp, uint32_t did,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ed46b24a3380..e6a47e25b218 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -61,6 +61,7 @@ static uint8_t lpfcAlpaArray[] = {
61 61
62static void lpfc_disc_timeout_handler(struct lpfc_vport *); 62static void lpfc_disc_timeout_handler(struct lpfc_vport *);
63static void lpfc_disc_flush_list(struct lpfc_vport *vport); 63static void lpfc_disc_flush_list(struct lpfc_vport *vport);
64static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
64 65
65void 66void
66lpfc_terminate_rport_io(struct fc_rport *rport) 67lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -1009,9 +1010,15 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1009 spin_lock_irqsave(&phba->hbalock, flags); 1010 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED; 1011 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags); 1012 spin_unlock_irqrestore(&phba->hbalock, flags);
1013 /* If there is a pending FCoE event, restart FCF table scan. */
1014 if (lpfc_check_pending_fcoe_event(phba, 1)) {
1015 mempool_free(mboxq, phba->mbox_mem_pool);
1016 return;
1017 }
1012 if (vport->port_state != LPFC_FLOGI) { 1018 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags); 1019 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1020 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1021 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1015 spin_unlock_irqrestore(&phba->hbalock, flags); 1022 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport); 1023 lpfc_initial_flogi(vport);
1017 } 1024 }
@@ -1054,6 +1061,39 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1054} 1061}
1055 1062
1056/** 1063/**
1064 * lpfc_sw_name_match - Check if the fcf switch name match.
1065 * @fab_name: pointer to fabric name.
1066 * @new_fcf_record: pointer to fcf record.
1067 *
1068 * This routine compare the fcf record's switch name with provided
1069 * switch name. If the switch name are identical this function
1070 * returns 1 else return 0.
1071 **/
1072static uint32_t
1073lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1074{
1075 if ((sw_name[0] ==
1076 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
1077 (sw_name[1] ==
1078 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
1079 (sw_name[2] ==
1080 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
1081 (sw_name[3] ==
1082 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
1083 (sw_name[4] ==
1084 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
1085 (sw_name[5] ==
1086 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
1087 (sw_name[6] ==
1088 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
1089 (sw_name[7] ==
1090 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
1091 return 1;
1092 else
1093 return 0;
1094}
1095
1096/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match. 1097 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure. 1098 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record. 1099 * @new_fcf_record: pointer to fcf record.
@@ -1123,6 +1163,22 @@ lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1163 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1164 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority; 1165 phba->fcf.priority = new_fcf_record->fip_priority;
1166 phba->fcf.switch_name[0] =
1167 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1168 phba->fcf.switch_name[1] =
1169 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1170 phba->fcf.switch_name[2] =
1171 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1172 phba->fcf.switch_name[3] =
1173 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1174 phba->fcf.switch_name[4] =
1175 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1176 phba->fcf.switch_name[5] =
1177 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1178 phba->fcf.switch_name[6] =
1179 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1180 phba->fcf.switch_name[7] =
1181 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1126} 1182}
1127 1183
1128/** 1184/**
@@ -1150,6 +1206,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1150 /* The FCF is already registered, start discovery */ 1206 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1207 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1208 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1209 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1153 spin_unlock_irqrestore(&phba->hbalock, flags); 1210 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI) 1211 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport); 1212 lpfc_initial_flogi(phba->pport);
@@ -1239,9 +1296,12 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1239 1296
1240 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && 1297 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1241 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, 1298 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1242 new_fcf_record)) 1299 new_fcf_record))
1300 continue;
1301 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1302 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1303 new_fcf_record))
1243 continue; 1304 continue;
1244
1245 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { 1305 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1246 /* 1306 /*
1247 * If the vlan bit map does not have the bit set for the 1307 * If the vlan bit map does not have the bit set for the
@@ -1336,6 +1396,60 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1336} 1396}
1337 1397
1338/** 1398/**
1399 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1400 * @phba: pointer to lpfc hba data structure.
1401 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1402 *
1403 * This function check if there is any fcoe event pending while driver
1404 * scan FCF entries. If there is any pending event, it will restart the
1405 * FCF saning and return 1 else return 0.
1406 */
1407int
1408lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1409{
1410 LPFC_MBOXQ_t *mbox;
1411 int rc;
1412 /*
1413 * If the Link is up and no FCoE events while in the
1414 * FCF discovery, no need to restart FCF discovery.
1415 */
1416 if ((phba->link_state >= LPFC_LINK_UP) &&
1417 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1418 return 0;
1419
1420 spin_lock_irq(&phba->hbalock);
1421 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1422 spin_unlock_irq(&phba->hbalock);
1423
1424 if (phba->link_state >= LPFC_LINK_UP)
1425 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
1426
1427 if (unreg_fcf) {
1428 spin_lock_irq(&phba->hbalock);
1429 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1430 spin_unlock_irq(&phba->hbalock);
1431 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1432 if (!mbox) {
1433 lpfc_printf_log(phba, KERN_ERR,
1434 LOG_DISCOVERY|LOG_MBOX,
1435 "2610 UNREG_FCFI mbox allocation failed\n");
1436 return 1;
1437 }
1438 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1439 mbox->vport = phba->pport;
1440 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1441 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1442 if (rc == MBX_NOT_FINISHED) {
1443 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1444 "2611 UNREG_FCFI issue mbox failed\n");
1445 mempool_free(mbox, phba->mbox_mem_pool);
1446 }
1447 }
1448
1449 return 1;
1450}
1451
1452/**
1339 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1453 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1340 * @phba: pointer to lpfc hba data structure. 1454 * @phba: pointer to lpfc hba data structure.
1341 * @mboxq: pointer to mailbox object. 1455 * @mboxq: pointer to mailbox object.
@@ -1367,6 +1481,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1367 unsigned long flags; 1481 unsigned long flags;
1368 uint16_t vlan_id; 1482 uint16_t vlan_id;
1369 1483
1484 /* If there is pending FCoE event restart FCF table scan */
1485 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1486 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1487 return;
1488 }
1489
1370 /* Get the first SGE entry from the non-embedded DMA memory. This 1490 /* Get the first SGE entry from the non-embedded DMA memory. This
1371 * routine only uses a single SGE. 1491 * routine only uses a single SGE.
1372 */ 1492 */
@@ -1424,7 +1544,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1424 spin_lock_irqsave(&phba->hbalock, flags); 1544 spin_lock_irqsave(&phba->hbalock, flags);
1425 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1545 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1426 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1546 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1427 new_fcf_record) && 1547 new_fcf_record) &&
1548 lpfc_sw_name_match(phba->fcf.switch_name,
1549 new_fcf_record) &&
1428 lpfc_mac_addr_match(phba, new_fcf_record)) { 1550 lpfc_mac_addr_match(phba, new_fcf_record)) {
1429 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1551 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1430 spin_unlock_irqrestore(&phba->hbalock, flags); 1552 spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1464,9 +1586,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1464 * If there is a record with lower priority value for 1586 * If there is a record with lower priority value for
1465 * the current FCF, use that record. 1587 * the current FCF, use that record.
1466 */ 1588 */
1467 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) 1589 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1468 && (new_fcf_record->fip_priority < 1590 new_fcf_record) &&
1469 phba->fcf.priority)) { 1591 (new_fcf_record->fip_priority < phba->fcf.priority)) {
1470 /* Use this FCF record */ 1592 /* Use this FCF record */
1471 lpfc_copy_fcf_record(phba, new_fcf_record); 1593 lpfc_copy_fcf_record(phba, new_fcf_record);
1472 phba->fcf.addr_mode = addr_mode; 1594 phba->fcf.addr_mode = addr_mode;
@@ -1512,6 +1634,39 @@ out:
1512} 1634}
1513 1635
1514/** 1636/**
1637 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1638 * @phba: pointer to lpfc hba data structure.
1639 * @mboxq: pointer to mailbox data structure.
1640 *
1641 * This function handles completion of init vpi mailbox command.
1642 */
1643static void
1644lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1645{
1646 struct lpfc_vport *vport = mboxq->vport;
1647 if (mboxq->u.mb.mbxStatus) {
1648 lpfc_printf_vlog(vport, KERN_ERR,
1649 LOG_MBOX,
1650 "2609 Init VPI mailbox failed 0x%x\n",
1651 mboxq->u.mb.mbxStatus);
1652 mempool_free(mboxq, phba->mbox_mem_pool);
1653 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1654 return;
1655 }
1656 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
1657
1658 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1659 lpfc_initial_fdisc(vport);
1660 else {
1661 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
1662 lpfc_printf_vlog(vport, KERN_ERR,
1663 LOG_ELS,
1664 "2606 No NPIV Fabric support\n");
1665 }
1666 return;
1667}
1668
1669/**
1515 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 1670 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1516 * @phba: pointer to lpfc hba data structure. 1671 * @phba: pointer to lpfc hba data structure.
1517 * 1672 *
@@ -1523,6 +1678,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1523{ 1678{
1524 struct lpfc_vport **vports; 1679 struct lpfc_vport **vports;
1525 int i; 1680 int i;
1681 LPFC_MBOXQ_t *mboxq;
1682 int rc;
1526 1683
1527 vports = lpfc_create_vport_work_array(phba); 1684 vports = lpfc_create_vport_work_array(phba);
1528 if (vports != NULL) { 1685 if (vports != NULL) {
@@ -1540,6 +1697,29 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1540 FC_VPORT_LINKDOWN); 1697 FC_VPORT_LINKDOWN);
1541 continue; 1698 continue;
1542 } 1699 }
1700 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
1701 mboxq = mempool_alloc(phba->mbox_mem_pool,
1702 GFP_KERNEL);
1703 if (!mboxq) {
1704 lpfc_printf_vlog(vports[i], KERN_ERR,
1705 LOG_MBOX, "2607 Failed to allocate "
1706 "init_vpi mailbox\n");
1707 continue;
1708 }
1709 lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
1710 mboxq->vport = vports[i];
1711 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
1712 rc = lpfc_sli_issue_mbox(phba, mboxq,
1713 MBX_NOWAIT);
1714 if (rc == MBX_NOT_FINISHED) {
1715 lpfc_printf_vlog(vports[i], KERN_ERR,
1716 LOG_MBOX, "2608 Failed to issue "
1717 "init_vpi mailbox\n");
1718 mempool_free(mboxq,
1719 phba->mbox_mem_pool);
1720 }
1721 continue;
1722 }
1543 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1723 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1544 lpfc_initial_fdisc(vports[i]); 1724 lpfc_initial_fdisc(vports[i]);
1545 else { 1725 else {
@@ -1769,6 +1949,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1769 goto out; 1949 goto out;
1770 } 1950 }
1771 } else { 1951 } else {
1952 vport->port_state = LPFC_VPORT_UNKNOWN;
1772 /* 1953 /*
1773 * Add the driver's default FCF record at FCF index 0 now. This 1954 * Add the driver's default FCF record at FCF index 0 now. This
1774 * is phase 1 implementation that support FCF index 0 and driver 1955 * is phase 1 implementation that support FCF index 0 and driver
@@ -1804,6 +1985,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1804 * The driver is expected to do FIP/FCF. Call the port 1985 * The driver is expected to do FIP/FCF. Call the port
1805 * and get the FCF Table. 1986 * and get the FCF Table.
1806 */ 1987 */
1988 spin_lock_irq(&phba->hbalock);
1989 if (phba->hba_flag & FCF_DISC_INPROGRESS) {
1990 spin_unlock_irq(&phba->hbalock);
1991 return;
1992 }
1993 spin_unlock_irq(&phba->hbalock);
1807 rc = lpfc_sli4_read_fcf_record(phba, 1994 rc = lpfc_sli4_read_fcf_record(phba,
1808 LPFC_FCOE_FCF_GET_FIRST); 1995 LPFC_FCOE_FCF_GET_FIRST);
1809 if (rc) 1996 if (rc)
@@ -2113,13 +2300,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2113 LPFC_MBOXQ_t *pmb = NULL; 2300 LPFC_MBOXQ_t *pmb = NULL;
2114 MAILBOX_t *mb; 2301 MAILBOX_t *mb;
2115 struct static_vport_info *vport_info; 2302 struct static_vport_info *vport_info;
2116 int rc, i; 2303 int rc = 0, i;
2117 struct fc_vport_identifiers vport_id; 2304 struct fc_vport_identifiers vport_id;
2118 struct fc_vport *new_fc_vport; 2305 struct fc_vport *new_fc_vport;
2119 struct Scsi_Host *shost; 2306 struct Scsi_Host *shost;
2120 struct lpfc_vport *vport; 2307 struct lpfc_vport *vport;
2121 uint16_t offset = 0; 2308 uint16_t offset = 0;
2122 uint8_t *vport_buff; 2309 uint8_t *vport_buff;
2310 struct lpfc_dmabuf *mp;
2311 uint32_t byte_count = 0;
2123 2312
2124 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2313 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2125 if (!pmb) { 2314 if (!pmb) {
@@ -2142,7 +2331,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2142 2331
2143 vport_buff = (uint8_t *) vport_info; 2332 vport_buff = (uint8_t *) vport_info;
2144 do { 2333 do {
2145 lpfc_dump_static_vport(phba, pmb, offset); 2334 if (lpfc_dump_static_vport(phba, pmb, offset))
2335 goto out;
2336
2146 pmb->vport = phba->pport; 2337 pmb->vport = phba->pport;
2147 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); 2338 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2148 2339
@@ -2155,17 +2346,30 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2155 goto out; 2346 goto out;
2156 } 2347 }
2157 2348
2158 if (mb->un.varDmp.word_cnt > 2349 if (phba->sli_rev == LPFC_SLI_REV4) {
2159 sizeof(struct static_vport_info) - offset) 2350 byte_count = pmb->u.mqe.un.mb_words[5];
2160 mb->un.varDmp.word_cnt = 2351 mp = (struct lpfc_dmabuf *) pmb->context2;
2161 sizeof(struct static_vport_info) - offset; 2352 if (byte_count > sizeof(struct static_vport_info) -
2162 2353 offset)
2163 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 2354 byte_count = sizeof(struct static_vport_info)
2164 vport_buff + offset, 2355 - offset;
2165 mb->un.varDmp.word_cnt); 2356 memcpy(vport_buff + offset, mp->virt, byte_count);
2166 offset += mb->un.varDmp.word_cnt; 2357 offset += byte_count;
2358 } else {
2359 if (mb->un.varDmp.word_cnt >
2360 sizeof(struct static_vport_info) - offset)
2361 mb->un.varDmp.word_cnt =
2362 sizeof(struct static_vport_info)
2363 - offset;
2364 byte_count = mb->un.varDmp.word_cnt;
2365 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2366 vport_buff + offset,
2367 byte_count);
2368
2369 offset += byte_count;
2370 }
2167 2371
2168 } while (mb->un.varDmp.word_cnt && 2372 } while (byte_count &&
2169 offset < sizeof(struct static_vport_info)); 2373 offset < sizeof(struct static_vport_info));
2170 2374
2171 2375
@@ -2198,7 +2402,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2198 if (!new_fc_vport) { 2402 if (!new_fc_vport) {
2199 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2403 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2200 "0546 lpfc_create_static_vport failed to" 2404 "0546 lpfc_create_static_vport failed to"
2201 " create vport \n"); 2405 " create vport\n");
2202 continue; 2406 continue;
2203 } 2407 }
2204 2408
@@ -2207,16 +2411,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2207 } 2411 }
2208 2412
2209out: 2413out:
2210 /*
2211 * If this is timed out command, setting NULL to context2 tell SLI
2212 * layer not to use this buffer.
2213 */
2214 spin_lock_irq(&phba->hbalock);
2215 pmb->context2 = NULL;
2216 spin_unlock_irq(&phba->hbalock);
2217 kfree(vport_info); 2414 kfree(vport_info);
2218 if (rc != MBX_TIMEOUT) 2415 if (rc != MBX_TIMEOUT) {
2416 if (pmb->context2) {
2417 mp = (struct lpfc_dmabuf *) pmb->context2;
2418 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2419 kfree(mp);
2420 }
2219 mempool_free(pmb, phba->mbox_mem_pool); 2421 mempool_free(pmb, phba->mbox_mem_pool);
2422 }
2220 2423
2221 return; 2424 return;
2222} 2425}
@@ -4360,7 +4563,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
4360 fcoe_param_hdr = (struct lpfc_fip_param_hdr *) 4563 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4361 buff; 4564 buff;
4362 fcoe_param = (struct lpfc_fcoe_params *) 4565 fcoe_param = (struct lpfc_fcoe_params *)
4363 buff + sizeof(struct lpfc_fip_param_hdr); 4566 (buff + sizeof(struct lpfc_fip_param_hdr));
4364 4567
4365 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || 4568 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4366 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 4569 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 8a3a026667e4..ccb26724dc53 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2496,8 +2496,8 @@ typedef struct {
2496#define DMP_VPORT_REGION_SIZE 0x200 2496#define DMP_VPORT_REGION_SIZE 0x200
2497#define DMP_MBOX_OFFSET_WORD 0x5 2497#define DMP_MBOX_OFFSET_WORD 0x5
2498 2498
2499#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ 2499#define DMP_REGION_23 0x17 /* fcoe param and port state region */
2500#define DMP_FCOEPARAM_RGN_SIZE 0x400 2500#define DMP_RGN23_SIZE 0x400
2501 2501
2502#define WAKE_UP_PARMS_REGION_ID 4 2502#define WAKE_UP_PARMS_REGION_ID 4
2503#define WAKE_UP_PARMS_WORD_SIZE 15 2503#define WAKE_UP_PARMS_WORD_SIZE 15
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2995d128f07f..3689eee04535 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,6 +52,31 @@ struct dma_address {
52 uint32_t addr_hi; 52 uint32_t addr_hi;
53}; 53};
54 54
55#define LPFC_SLIREV_CONF_WORD 0x58
56struct lpfc_sli_intf {
57 uint32_t word0;
58#define lpfc_sli_intf_iftype_MASK 0x00000007
59#define lpfc_sli_intf_iftype_SHIFT 0
60#define lpfc_sli_intf_iftype_WORD word0
61#define lpfc_sli_intf_rev_MASK 0x0000000f
62#define lpfc_sli_intf_rev_SHIFT 4
63#define lpfc_sli_intf_rev_WORD word0
64#define LPFC_SLIREV_CONF_SLI4 4
65#define lpfc_sli_intf_family_MASK 0x000000ff
66#define lpfc_sli_intf_family_SHIFT 8
67#define lpfc_sli_intf_family_WORD word0
68#define lpfc_sli_intf_feat1_MASK 0x000000ff
69#define lpfc_sli_intf_feat1_SHIFT 16
70#define lpfc_sli_intf_feat1_WORD word0
71#define lpfc_sli_intf_feat2_MASK 0x0000001f
72#define lpfc_sli_intf_feat2_SHIFT 24
73#define lpfc_sli_intf_feat2_WORD word0
74#define lpfc_sli_intf_valid_MASK 0x00000007
75#define lpfc_sli_intf_valid_SHIFT 29
76#define lpfc_sli_intf_valid_WORD word0
77#define LPFC_SLI_INTF_VALID 6
78};
79
55#define LPFC_SLI4_BAR0 1 80#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2 81#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4 82#define LPFC_SLI4_BAR2 4
@@ -1181,6 +1206,32 @@ struct fcf_record {
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF 1206#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8 1207#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512]; 1208 uint8_t vlan_bitmap[512];
1209 uint32_t word137;
1210#define lpfc_fcf_record_switch_name_0_SHIFT 0
1211#define lpfc_fcf_record_switch_name_0_MASK 0x000000FF
1212#define lpfc_fcf_record_switch_name_0_WORD word137
1213#define lpfc_fcf_record_switch_name_1_SHIFT 8
1214#define lpfc_fcf_record_switch_name_1_MASK 0x000000FF
1215#define lpfc_fcf_record_switch_name_1_WORD word137
1216#define lpfc_fcf_record_switch_name_2_SHIFT 16
1217#define lpfc_fcf_record_switch_name_2_MASK 0x000000FF
1218#define lpfc_fcf_record_switch_name_2_WORD word137
1219#define lpfc_fcf_record_switch_name_3_SHIFT 24
1220#define lpfc_fcf_record_switch_name_3_MASK 0x000000FF
1221#define lpfc_fcf_record_switch_name_3_WORD word137
1222 uint32_t word138;
1223#define lpfc_fcf_record_switch_name_4_SHIFT 0
1224#define lpfc_fcf_record_switch_name_4_MASK 0x000000FF
1225#define lpfc_fcf_record_switch_name_4_WORD word138
1226#define lpfc_fcf_record_switch_name_5_SHIFT 8
1227#define lpfc_fcf_record_switch_name_5_MASK 0x000000FF
1228#define lpfc_fcf_record_switch_name_5_WORD word138
1229#define lpfc_fcf_record_switch_name_6_SHIFT 16
1230#define lpfc_fcf_record_switch_name_6_MASK 0x000000FF
1231#define lpfc_fcf_record_switch_name_6_WORD word138
1232#define lpfc_fcf_record_switch_name_7_SHIFT 24
1233#define lpfc_fcf_record_switch_name_7_MASK 0x000000FF
1234#define lpfc_fcf_record_switch_name_7_WORD word138
1184}; 1235};
1185 1236
1186struct lpfc_mbx_read_fcf_tbl { 1237struct lpfc_mbx_read_fcf_tbl {
@@ -1385,20 +1436,17 @@ struct lpfc_mbx_unreg_vfi {
1385 1436
1386struct lpfc_mbx_resume_rpi { 1437struct lpfc_mbx_resume_rpi {
1387 uint32_t word1; 1438 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0 1439#define lpfc_resume_rpi_index_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF 1440#define lpfc_resume_rpi_index_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1 1441#define lpfc_resume_rpi_index_WORD word1
1442#define lpfc_resume_rpi_ii_SHIFT 30
1443#define lpfc_resume_rpi_ii_MASK 0x00000003
1444#define lpfc_resume_rpi_ii_WORD word1
1445#define RESUME_INDEX_RPI 0
1446#define RESUME_INDEX_VPI 1
1447#define RESUME_INDEX_VFI 2
1448#define RESUME_INDEX_FCFI 3
1391 uint32_t event_tag; 1449 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402}; 1450};
1403 1451
1404#define REG_FCF_INVALID_QID 0xFFFF 1452#define REG_FCF_INVALID_QID 0xFFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fc67cc65c63b..562d8cee874b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -211,7 +211,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
211 goto out_free_mbox; 211 goto out_free_mbox;
212 212
213 do { 213 do {
214 lpfc_dump_mem(phba, pmb, offset); 214 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216 216
217 if (rc != MBX_SUCCESS) { 217 if (rc != MBX_SUCCESS) {
@@ -425,6 +425,9 @@ lpfc_config_port_post(struct lpfc_hba *phba)
425 return -EIO; 425 return -EIO;
426 } 426 }
427 427
428 /* Check if the port is disabled */
429 lpfc_sli_read_link_ste(phba);
430
428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 431 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 432 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
430 phba->cfg_hba_queue_depth = 433 phba->cfg_hba_queue_depth =
@@ -524,27 +527,46 @@ lpfc_config_port_post(struct lpfc_hba *phba)
524 /* Set up error attention (ERATT) polling timer */ 527 /* Set up error attention (ERATT) polling timer */
525 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 528 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
526 529
527 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 530 if (phba->hba_flag & LINK_DISABLED) {
528 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 531 lpfc_printf_log(phba,
529 lpfc_set_loopback_flag(phba); 532 KERN_ERR, LOG_INIT,
530 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 533 "2598 Adapter Link is disabled.\n");
531 if (rc != MBX_SUCCESS) { 534 lpfc_down_link(phba, pmb);
532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 535 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
536 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
537 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
538 lpfc_printf_log(phba,
539 KERN_ERR, LOG_INIT,
540 "2599 Adapter failed to issue DOWN_LINK"
541 " mbox command rc 0x%x\n", rc);
542
543 mempool_free(pmb, phba->mbox_mem_pool);
544 return -EIO;
545 }
546 } else {
547 lpfc_init_link(phba, pmb, phba->cfg_topology,
548 phba->cfg_link_speed);
549 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
550 lpfc_set_loopback_flag(phba);
551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
552 if (rc != MBX_SUCCESS) {
553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
533 "0454 Adapter failed to init, mbxCmd x%x " 554 "0454 Adapter failed to init, mbxCmd x%x "
534 "INIT_LINK, mbxStatus x%x\n", 555 "INIT_LINK, mbxStatus x%x\n",
535 mb->mbxCommand, mb->mbxStatus); 556 mb->mbxCommand, mb->mbxStatus);
536 557
537 /* Clear all interrupt enable conditions */ 558 /* Clear all interrupt enable conditions */
538 writel(0, phba->HCregaddr); 559 writel(0, phba->HCregaddr);
539 readl(phba->HCregaddr); /* flush */ 560 readl(phba->HCregaddr); /* flush */
540 /* Clear all pending interrupts */ 561 /* Clear all pending interrupts */
541 writel(0xffffffff, phba->HAregaddr); 562 writel(0xffffffff, phba->HAregaddr);
542 readl(phba->HAregaddr); /* flush */ 563 readl(phba->HAregaddr); /* flush */
543 564
544 phba->link_state = LPFC_HBA_ERROR; 565 phba->link_state = LPFC_HBA_ERROR;
545 if (rc != MBX_BUSY) 566 if (rc != MBX_BUSY)
546 mempool_free(pmb, phba->mbox_mem_pool); 567 mempool_free(pmb, phba->mbox_mem_pool);
547 return -EIO; 568 return -EIO;
569 }
548 } 570 }
549 /* MBOX buffer will be freed in mbox compl */ 571 /* MBOX buffer will be freed in mbox compl */
550 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 572 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -558,7 +580,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
558 KERN_ERR, 580 KERN_ERR,
559 LOG_INIT, 581 LOG_INIT,
560 "0456 Adapter failed to issue " 582 "0456 Adapter failed to issue "
561 "ASYNCEVT_ENABLE mbox status x%x \n.", 583 "ASYNCEVT_ENABLE mbox status x%x\n",
562 rc); 584 rc);
563 mempool_free(pmb, phba->mbox_mem_pool); 585 mempool_free(pmb, phba->mbox_mem_pool);
564 } 586 }
@@ -572,7 +594,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
572 594
573 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 595 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
575 "to get Option ROM version status x%x\n.", rc); 597 "to get Option ROM version status x%x\n", rc);
576 mempool_free(pmb, phba->mbox_mem_pool); 598 mempool_free(pmb, phba->mbox_mem_pool);
577 } 599 }
578 600
@@ -2133,6 +2155,8 @@ lpfc_online(struct lpfc_hba *phba)
2133 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2155 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2134 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2156 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2135 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2157 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2158 if (phba->sli_rev == LPFC_SLI_REV4)
2159 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2136 spin_unlock_irq(shost->host_lock); 2160 spin_unlock_irq(shost->host_lock);
2137 } 2161 }
2138 lpfc_destroy_vport_work_array(phba, vports); 2162 lpfc_destroy_vport_work_array(phba, vports);
@@ -2807,6 +2831,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2807 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 2831 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2808 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 2832 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2809 return; 2833 return;
2834 phba->fcoe_eventtag = acqe_link->event_tag;
2810 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2835 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2811 if (!pmb) { 2836 if (!pmb) {
2812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2837 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -2894,18 +2919,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2894 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2895 int rc; 2920 int rc;
2896 2921
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2897 switch (event_type) { 2923 switch (event_type) {
2898 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2899 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2925 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2900 "2546 New FCF found index 0x%x tag 0x%x \n", 2926 "2546 New FCF found index 0x%x tag 0x%x\n",
2901 acqe_fcoe->fcf_index, 2927 acqe_fcoe->fcf_index,
2902 acqe_fcoe->event_tag); 2928 acqe_fcoe->event_tag);
2903 /* 2929 /*
2904 * If the current FCF is in discovered state, 2930 * If the current FCF is in discovered state, or
2905 * do nothing. 2931 * FCF discovery is in progress do nothing.
2906 */ 2932 */
2907 spin_lock_irq(&phba->hbalock); 2933 spin_lock_irq(&phba->hbalock);
2908 if (phba->fcf.fcf_flag & FCF_DISCOVERED) { 2934 if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
2935 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
2909 spin_unlock_irq(&phba->hbalock); 2936 spin_unlock_irq(&phba->hbalock);
2910 break; 2937 break;
2911 } 2938 }
@@ -2922,7 +2949,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2922 2949
2923 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 2950 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2924 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2925 "2548 FCF Table full count 0x%x tag 0x%x \n", 2952 "2548 FCF Table full count 0x%x tag 0x%x\n",
2926 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 2953 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2927 acqe_fcoe->event_tag); 2954 acqe_fcoe->event_tag);
2928 break; 2955 break;
@@ -2930,7 +2957,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2930 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 2957 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2931 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2958 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2932 "2549 FCF disconnected fron network index 0x%x" 2959 "2549 FCF disconnected fron network index 0x%x"
2933 " tag 0x%x \n", acqe_fcoe->fcf_index, 2960 " tag 0x%x\n", acqe_fcoe->fcf_index,
2934 acqe_fcoe->event_tag); 2961 acqe_fcoe->event_tag);
2935 /* If the event is not for currently used fcf do nothing */ 2962 /* If the event is not for currently used fcf do nothing */
2936 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 2963 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
@@ -4130,8 +4157,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4130 /* Allocate memory for HBA structure */ 4157 /* Allocate memory for HBA structure */
4131 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4158 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4132 if (!phba) { 4159 if (!phba) {
4133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4160 dev_err(&pdev->dev, "failed to allocate hba struct\n");
4134 "1417 Failed to allocate hba struct.\n");
4135 return NULL; 4161 return NULL;
4136 } 4162 }
4137 4163
@@ -4145,6 +4171,9 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4145 return NULL; 4171 return NULL;
4146 } 4172 }
4147 4173
4174 mutex_init(&phba->ct_event_mutex);
4175 INIT_LIST_HEAD(&phba->ct_ev_waiters);
4176
4148 return phba; 4177 return phba;
4149} 4178}
4150 4179
@@ -4489,23 +4518,6 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4489 if (!phba->sli4_hba.STAregaddr) 4518 if (!phba->sli4_hba.STAregaddr)
4490 return -ENODEV; 4519 return -ENODEV;
4491 4520
4492 /* With uncoverable error, log the error message and return error */
4493 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4494 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4495 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4496 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4497 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4498 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4500 "1422 HBA Unrecoverable error: "
4501 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4502 "online0_reg=0x%x, online1_reg=0x%x\n",
4503 uerrlo_reg.word0, uerrhi_reg.word0,
4504 onlnreg0, onlnreg1);
4505 }
4506 return -ENODEV;
4507 }
4508
4509 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4521 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4510 for (i = 0; i < 3000; i++) { 4522 for (i = 0; i < 3000; i++) {
4511 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4523 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
@@ -4545,6 +4557,23 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4545 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4557 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4546 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4558 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4547 4559
4560 /* With uncoverable error, log the error message and return error */
4561 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4562 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4563 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4564 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4565 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4566 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4568 "1422 HBA Unrecoverable error: "
4569 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4570 "online0_reg=0x%x, online1_reg=0x%x\n",
4571 uerrlo_reg.word0, uerrhi_reg.word0,
4572 onlnreg0, onlnreg1);
4573 }
4574 return -ENODEV;
4575 }
4576
4548 return port_error; 4577 return port_error;
4549} 4578}
4550 4579
@@ -7347,6 +7376,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7347 /* Perform post initialization setup */ 7376 /* Perform post initialization setup */
7348 lpfc_post_init_setup(phba); 7377 lpfc_post_init_setup(phba);
7349 7378
7379 /* Check if there are static vports to be created. */
7380 lpfc_create_static_vport(phba);
7381
7350 return 0; 7382 return 0;
7351 7383
7352out_disable_intr: 7384out_disable_intr:
@@ -7636,19 +7668,17 @@ static int __devinit
7636lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 7668lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7637{ 7669{
7638 int rc; 7670 int rc;
7639 uint16_t dev_id; 7671 struct lpfc_sli_intf intf;
7640 7672
7641 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) 7673 if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
7642 return -ENODEV; 7674 return -ENODEV;
7643 7675
7644 switch (dev_id) { 7676 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7645 case PCI_DEVICE_ID_TIGERSHARK: 7677 (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
7646 rc = lpfc_pci_probe_one_s4(pdev, pid); 7678 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break; 7679 else
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid); 7680 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break; 7681
7651 }
7652 return rc; 7682 return rc;
7653} 7683}
7654 7684
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 3423571dd1b3..1ab405902a18 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -52,48 +52,85 @@
52 * This routine prepares the mailbox command for dumping list of static 52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created. 53 * vports to be created.
54 **/ 54 **/
55void 55int
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, 56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset) 57 uint16_t offset)
58{ 58{
59 MAILBOX_t *mb; 59 MAILBOX_t *mb;
60 void *ctx; 60 struct lpfc_dmabuf *mp;
61 61
62 mb = &pmb->u.mb; 62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64 63
65 /* Setup to dump vport info region */ 64 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 65 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY; 66 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS; 67 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset; 68 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT; 69 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST; 70 mb->mbxOwner = OWN_HOST;
77 71
78 return; 72 /* For SLI3 HBAs data is embedded in mailbox */
73 if (phba->sli_rev != LPFC_SLI_REV4) {
74 mb->un.varDmp.cv = 1;
75 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
76 return 0;
77 }
78
79 /* For SLI4 HBAs driver need to allocate memory */
80 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
81 if (mp)
82 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
83
84 if (!mp || !mp->virt) {
85 kfree(mp);
86 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
87 "2605 lpfc_dump_static_vport: memory"
88 " allocation failed\n");
89 return 1;
90 }
91 memset(mp->virt, 0, LPFC_BPL_SIZE);
92 INIT_LIST_HEAD(&mp->list);
93 /* save address for completion */
94 pmb->context2 = (uint8_t *) mp;
95 mb->un.varWords[3] = putPaddrLow(mp->phys);
96 mb->un.varWords[4] = putPaddrHigh(mp->phys);
97 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
98
99 return 0;
100}
101
102/**
103 * lpfc_down_link - Bring down HBAs link.
104 * @phba: pointer to lpfc hba data structure.
105 * @pmb: pointer to the driver internal queue element for mailbox command.
106 *
107 * This routine prepares a mailbox command to bring down HBA link.
108 **/
109void
110lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
111{
112 MAILBOX_t *mb;
113 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
114 mb = &pmb->u.mb;
115 mb->mbxCommand = MBX_DOWN_LINK;
116 mb->mbxOwner = OWN_HOST;
79} 117}
80 118
81/** 119/**
82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 120 * lpfc_dump_mem - Prepare a mailbox command for reading a region.
83 * @phba: pointer to lpfc hba data structure. 121 * @phba: pointer to lpfc hba data structure.
84 * @pmb: pointer to the driver internal queue element for mailbox command. 122 * @pmb: pointer to the driver internal queue element for mailbox command.
85 * @offset: offset for dumping VPD memory mailbox command. 123 * @offset: offset into the region.
124 * @region_id: config region id.
86 * 125 *
87 * The dump mailbox command provides a method for the device driver to obtain 126 * The dump mailbox command provides a method for the device driver to obtain
88 * various types of information from the HBA device. 127 * various types of information from the HBA device.
89 * 128 *
90 * This routine prepares the mailbox command for dumping HBA Vital Product 129 * This routine prepares the mailbox command for dumping HBA's config region.
91 * Data (VPD) memory. This mailbox command is to be used for retrieving a
92 * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
93 * offset specified by the offset parameter.
94 **/ 130 **/
95void 131void
96lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) 132lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
133 uint16_t region_id)
97{ 134{
98 MAILBOX_t *mb; 135 MAILBOX_t *mb;
99 void *ctx; 136 void *ctx;
@@ -107,7 +144,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
107 mb->un.varDmp.cv = 1; 144 mb->un.varDmp.cv = 1;
108 mb->un.varDmp.type = DMP_NV_PARAMS; 145 mb->un.varDmp.type = DMP_NV_PARAMS;
109 mb->un.varDmp.entry_index = offset; 146 mb->un.varDmp.entry_index = offset;
110 mb->un.varDmp.region_id = DMP_REGION_VPD; 147 mb->un.varDmp.region_id = region_id;
111 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); 148 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
112 mb->un.varDmp.co = 0; 149 mb->un.varDmp.co = 0;
113 mb->un.varDmp.resp_offset = 0; 150 mb->un.varDmp.resp_offset = 0;
@@ -1789,6 +1826,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1789 1826
1790/** 1827/**
1791 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command 1828 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1829 * @phba: pointer to the hba structure to init the VPI for.
1792 * @mbox: pointer to lpfc mbox command to initialize. 1830 * @mbox: pointer to lpfc mbox command to initialize.
1793 * @vpi: VPI to be initialized. 1831 * @vpi: VPI to be initialized.
1794 * 1832 *
@@ -1799,11 +1837,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1799 * successful virtual NPort login. 1837 * successful virtual NPort login.
1800 **/ 1838 **/
1801void 1839void
1802lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi) 1840lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1803{ 1841{
1804 memset(mbox, 0, sizeof(*mbox)); 1842 memset(mbox, 0, sizeof(*mbox));
1805 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); 1843 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1806 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi); 1844 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
1845 vpi + phba->vpi_base);
1846 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
1847 phba->pport->vfi + phba->vfi_base);
1807} 1848}
1808 1849
1809/** 1850/**
@@ -1852,7 +1893,7 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1852 /* dump_fcoe_param failed to allocate memory */ 1893 /* dump_fcoe_param failed to allocate memory */
1853 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 1894 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1854 "2569 lpfc_dump_fcoe_param: memory" 1895 "2569 lpfc_dump_fcoe_param: memory"
1855 " allocation failed \n"); 1896 " allocation failed\n");
1856 return 1; 1897 return 1;
1857 } 1898 }
1858 1899
@@ -1864,8 +1905,8 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1864 1905
1865 mb->mbxCommand = MBX_DUMP_MEMORY; 1906 mb->mbxCommand = MBX_DUMP_MEMORY;
1866 mb->un.varDmp.type = DMP_NV_PARAMS; 1907 mb->un.varDmp.type = DMP_NV_PARAMS;
1867 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM; 1908 mb->un.varDmp.region_id = DMP_REGION_23;
1868 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE; 1909 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
1869 mb->un.varWords[3] = putPaddrLow(mp->phys); 1910 mb->un.varWords[3] = putPaddrLow(mp->phys);
1870 mb->un.varWords[4] = putPaddrHigh(mp->phys); 1911 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1871 return 0; 1912 return 0;
@@ -1938,9 +1979,7 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1938 memset(mbox, 0, sizeof(*mbox)); 1979 memset(mbox, 0, sizeof(*mbox));
1939 resume_rpi = &mbox->u.mqe.un.resume_rpi; 1980 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1940 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); 1981 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1941 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi); 1982 bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
1942 bf_set(lpfc_resume_rpi_vpi, resume_rpi, 1983 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
1943 ndlp->vport->vpi + ndlp->vport->phba->vpi_base); 1984 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
1944 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1945 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1946} 1985}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index e198c917c13e..a1b6db6016da 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -110,17 +110,28 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
110 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
111 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
112 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", 113
114 if (phba->sli_rev == LPFC_SLI_REV4) {
115 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
114 phba->pcidev, 116 phba->pcidev,
115 LPFC_HDR_BUF_SIZE, align, 0); 117 LPFC_HDR_BUF_SIZE, align, 0);
116 if (!phba->lpfc_hrb_pool) 118 if (!phba->lpfc_hrb_pool)
117 goto fail_free_nlp_mem_pool; 119 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", 120
121 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev, 122 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0); 123 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool) 124 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool; 125 goto fail_free_hrb_pool;
123 126 phba->lpfc_hbq_pool = NULL;
127 } else {
128 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
129 phba->pcidev, LPFC_BPL_SIZE, align, 0);
130 if (!phba->lpfc_hbq_pool)
131 goto fail_free_nlp_mem_pool;
132 phba->lpfc_hrb_pool = NULL;
133 phba->lpfc_drb_pool = NULL;
134 }
124 /* vpi zero is reserved for the physical port so add 1 to max */ 135 /* vpi zero is reserved for the physical port so add 1 to max */
125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 136 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 137 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
@@ -132,7 +143,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
132 fail_free_dbq_pool: 143 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool); 144 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL; 145 phba->lpfc_drb_pool = NULL;
135 fail_free_hbq_pool: 146 fail_free_hrb_pool:
136 pci_pool_destroy(phba->lpfc_hrb_pool); 147 pci_pool_destroy(phba->lpfc_hrb_pool);
137 phba->lpfc_hrb_pool = NULL; 148 phba->lpfc_hrb_pool = NULL;
138 fail_free_nlp_mem_pool: 149 fail_free_nlp_mem_pool:
@@ -176,11 +187,17 @@ lpfc_mem_free(struct lpfc_hba *phba)
176 187
177 /* Free HBQ pools */ 188 /* Free HBQ pools */
178 lpfc_sli_hbqbuf_free_all(phba); 189 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool); 190 if (phba->lpfc_drb_pool)
191 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL; 192 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool); 193 if (phba->lpfc_hrb_pool)
194 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL; 195 phba->lpfc_hrb_pool = NULL;
183 196
197 if (phba->lpfc_hbq_pool)
198 pci_pool_destroy(phba->lpfc_hbq_pool);
199 phba->lpfc_hbq_pool = NULL;
200
184 /* Free NLP memory pool */ 201 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool); 202 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL; 203 phba->nlp_mem_pool = NULL;
@@ -380,7 +397,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
380 if (!hbqbp) 397 if (!hbqbp)
381 return NULL; 398 return NULL;
382 399
383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 400 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
384 &hbqbp->dbuf.phys); 401 &hbqbp->dbuf.phys);
385 if (!hbqbp->dbuf.virt) { 402 if (!hbqbp->dbuf.virt) {
386 kfree(hbqbp); 403 kfree(hbqbp);
@@ -405,7 +422,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
405void 422void
406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 423lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
407{ 424{
408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 425 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
409 kfree(hbqbp); 426 kfree(hbqbp);
410 return; 427 return;
411} 428}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index 27d1a88a98fe..d655ed3eebef 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -177,3 +177,23 @@ struct temp_event {
177 uint32_t data; 177 uint32_t data;
178}; 178};
179 179
180/* bsg definitions */
181#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
182#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
183
184struct set_ct_event {
185 uint32_t command;
186 uint32_t ev_req_id;
187 uint32_t ev_reg_id;
188};
189
190struct get_ct_event {
191 uint32_t command;
192 uint32_t ev_reg_id;
193 uint32_t ev_req_id;
194};
195
196struct get_ct_event_reply {
197 uint32_t immed_data;
198 uint32_t type;
199};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index da59c4f0168f..61d089703806 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2142,7 +2142,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2142 } else if (resp_info & RESID_OVER) { 2142 } else if (resp_info & RESID_OVER) {
2143 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2143 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2144 "9028 FCP command x%x residual overrun error. " 2144 "9028 FCP command x%x residual overrun error. "
2145 "Data: x%x x%x \n", cmnd->cmnd[0], 2145 "Data: x%x x%x\n", cmnd->cmnd[0],
2146 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 2146 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
2147 host_status = DID_ERROR; 2147 host_status = DID_ERROR;
2148 2148
@@ -2843,7 +2843,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2843 dif_op_str[scsi_get_prot_op(cmnd)]); 2843 dif_op_str[scsi_get_prot_op(cmnd)]);
2844 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2844 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2845 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2845 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2846 "%02x %02x %02x %02x %02x \n", 2846 "%02x %02x %02x %02x %02x\n",
2847 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2847 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2848 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2848 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2849 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2849 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -2871,7 +2871,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2871 dif_op_str[scsi_get_prot_op(cmnd)]); 2871 dif_op_str[scsi_get_prot_op(cmnd)]);
2872 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2872 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2873 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2873 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2874 "%02x %02x %02x %02x %02x \n", 2874 "%02x %02x %02x %02x %02x\n",
2875 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2875 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2876 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2876 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2877 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2877 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -3584,6 +3584,7 @@ struct scsi_host_template lpfc_template = {
3584 .use_clustering = ENABLE_CLUSTERING, 3584 .use_clustering = ENABLE_CLUSTERING,
3585 .shost_attrs = lpfc_hba_attrs, 3585 .shost_attrs = lpfc_hba_attrs,
3586 .max_sectors = 0xFFFF, 3586 .max_sectors = 0xFFFF,
3587 .vendor_id = LPFC_NL_VENDOR_ID,
3587}; 3588};
3588 3589
3589struct scsi_host_template lpfc_vport_template = { 3590struct scsi_host_template lpfc_vport_template = {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index acc43b061ba1..43cbe336f1f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4139,7 +4139,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4139 return -EIO; 4139 return -EIO;
4140 } 4140 }
4141 data_length = mqe->un.mb_words[5]; 4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) { 4142 if (data_length > DMP_RGN23_SIZE) {
4143 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4143 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4144 kfree(mp); 4144 kfree(mp);
4145 return -EIO; 4145 return -EIO;
@@ -4304,7 +4304,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4304 */ 4304 */
4305 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 4305 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4306 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4306 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4307 "2570 Failed to read FCoE parameters \n"); 4307 "2570 Failed to read FCoE parameters\n");
4308 4308
4309 /* Issue READ_REV to collect vpd and FW information. */ 4309 /* Issue READ_REV to collect vpd and FW information. */
4310 vpd_size = PAGE_SIZE; 4310 vpd_size = PAGE_SIZE;
@@ -4522,12 +4522,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4522 lpfc_sli4_rb_setup(phba); 4522 lpfc_sli4_rb_setup(phba);
4523 4523
4524 /* Start the ELS watchdog timer */ 4524 /* Start the ELS watchdog timer */
4525 /* 4525 mod_timer(&vport->els_tmofunc,
4526 * The driver for SLI4 is not yet ready to process timeouts 4526 jiffies + HZ * (phba->fc_ratov * 2));
4527 * or interrupts. Once it is, the comment bars can be removed.
4528 */
4529 /* mod_timer(&vport->els_tmofunc,
4530 * jiffies + HZ * (phba->fc_ratov*2)); */
4531 4527
4532 /* Start heart beat timer */ 4528 /* Start heart beat timer */
4533 mod_timer(&phba->hb_tmofunc, 4529 mod_timer(&phba->hb_tmofunc,
@@ -4706,13 +4702,13 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4706 4702
4707 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4703 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4708 if (!pmbox) { 4704 if (!pmbox) {
4705 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4709 /* processing mbox queue from intr_handler */ 4706 /* processing mbox queue from intr_handler */
4710 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 4707 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4711 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4708 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4712 return MBX_SUCCESS; 4709 return MBX_SUCCESS;
4713 } 4710 }
4714 processing_queue = 1; 4711 processing_queue = 1;
4715 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4716 pmbox = lpfc_mbox_get(phba); 4712 pmbox = lpfc_mbox_get(phba);
4717 if (!pmbox) { 4713 if (!pmbox) {
4718 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4714 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -5279,6 +5275,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5279 unsigned long iflags; 5275 unsigned long iflags;
5280 int rc; 5276 int rc;
5281 5277
5278 rc = lpfc_mbox_dev_check(phba);
5279 if (unlikely(rc)) {
5280 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5281 "(%d):2544 Mailbox command x%x (x%x) "
5282 "cannot issue Data: x%x x%x\n",
5283 mboxq->vport ? mboxq->vport->vpi : 0,
5284 mboxq->u.mb.mbxCommand,
5285 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5286 psli->sli_flag, flag);
5287 goto out_not_finished;
5288 }
5289
5282 /* Detect polling mode and jump to a handler */ 5290 /* Detect polling mode and jump to a handler */
5283 if (!phba->sli4_hba.intr_enable) { 5291 if (!phba->sli4_hba.intr_enable) {
5284 if (flag == MBX_POLL) 5292 if (flag == MBX_POLL)
@@ -5338,17 +5346,6 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5338 psli->sli_flag, flag); 5346 psli->sli_flag, flag);
5339 goto out_not_finished; 5347 goto out_not_finished;
5340 } 5348 }
5341 rc = lpfc_mbox_dev_check(phba);
5342 if (unlikely(rc)) {
5343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5344 "(%d):2544 Mailbox command x%x (x%x) "
5345 "cannot issue Data: x%x x%x\n",
5346 mboxq->vport ? mboxq->vport->vpi : 0,
5347 mboxq->u.mb.mbxCommand,
5348 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5349 psli->sli_flag, flag);
5350 goto out_not_finished;
5351 }
5352 5349
5353 /* Put the mailbox command to the driver internal FIFO */ 5350 /* Put the mailbox command to the driver internal FIFO */
5354 psli->slistat.mbox_busy++; 5351 psli->slistat.mbox_busy++;
@@ -5817,19 +5814,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5817/** 5814/**
5818 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5815 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5819 * @phba: Pointer to HBA context object. 5816 * @phba: Pointer to HBA context object.
5820 * @piocb: Pointer to command iocb.
5821 * 5817 *
5822 * This routine performs a round robin SCSI command to SLI4 FCP WQ index 5818 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5823 * distribution. 5819 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
5820 * held.
5824 * 5821 *
5825 * Return: index into SLI4 fast-path FCP queue index. 5822 * Return: index into SLI4 fast-path FCP queue index.
5826 **/ 5823 **/
5827static uint32_t 5824static uint32_t
5828lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 5825lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
5829{ 5826{
5830 static uint32_t fcp_qidx; 5827 ++phba->fcp_qidx;
5828 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
5829 phba->fcp_qidx = 0;
5831 5830
5832 return fcp_qidx++ % phba->cfg_fcp_wq_count; 5831 return phba->fcp_qidx;
5833} 5832}
5834 5833
5835/** 5834/**
@@ -6156,7 +6155,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6156 return IOCB_ERROR; 6155 return IOCB_ERROR;
6157 6156
6158 if (piocb->iocb_flag & LPFC_IO_FCP) { 6157 if (piocb->iocb_flag & LPFC_IO_FCP) {
6159 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb); 6158 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6160 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) 6159 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6161 return IOCB_ERROR; 6160 return IOCB_ERROR;
6162 } else { 6161 } else {
@@ -6327,7 +6326,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
6327 KERN_ERR, 6326 KERN_ERR,
6328 LOG_SLI, 6327 LOG_SLI,
6329 "0346 Ring %d handler: unexpected ASYNC_STATUS" 6328 "0346 Ring %d handler: unexpected ASYNC_STATUS"
6330 " evt_code 0x%x \n" 6329 " evt_code 0x%x\n"
6331 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 6330 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
6332 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 6331 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
6333 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 6332 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
@@ -6790,6 +6789,33 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
6790 6789
6791 6790
6792/** 6791/**
6792 * lpfc_sli_bemem_bcopy - SLI memory copy function
6793 * @srcp: Source memory pointer.
6794 * @destp: Destination memory pointer.
6795 * @cnt: Number of words required to be copied.
6796 *
6797 * This function is used for copying data between a data structure
6798 * with big endian representation to local endianness.
6799 * This function can be called with or without lock.
6800 **/
6801void
6802lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
6803{
6804 uint32_t *src = srcp;
6805 uint32_t *dest = destp;
6806 uint32_t ldata;
6807 int i;
6808
6809 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
6810 ldata = *src;
6811 ldata = be32_to_cpu(ldata);
6812 *dest = ldata;
6813 src++;
6814 dest++;
6815 }
6816}
6817
6818/**
6793 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 6819 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
6794 * @phba: Pointer to HBA context object. 6820 * @phba: Pointer to HBA context object.
6795 * @pring: Pointer to driver SLI ring object. 6821 * @pring: Pointer to driver SLI ring object.
@@ -7678,12 +7704,6 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7678 "online0_reg=0x%x, online1_reg=0x%x\n", 7704 "online0_reg=0x%x, online1_reg=0x%x\n",
7679 uerr_sta_lo, uerr_sta_hi, 7705 uerr_sta_lo, uerr_sta_hi,
7680 onlnreg0, onlnreg1); 7706 onlnreg0, onlnreg1);
7681 /* TEMP: as the driver error recover logic is not
7682 * fully developed, we just log the error message
7683 * and the device error attention action is now
7684 * temporarily disabled.
7685 */
7686 return 0;
7687 phba->work_status[0] = uerr_sta_lo; 7707 phba->work_status[0] = uerr_sta_lo;
7688 phba->work_status[1] = uerr_sta_hi; 7708 phba->work_status[1] = uerr_sta_hi;
7689 /* Set the driver HA work bitmap */ 7709 /* Set the driver HA work bitmap */
@@ -9499,8 +9519,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9499 eq->host_index = 0; 9519 eq->host_index = 0;
9500 eq->hba_index = 0; 9520 eq->hba_index = 0;
9501 9521
9502 if (rc != MBX_TIMEOUT) 9522 mempool_free(mbox, phba->mbox_mem_pool);
9503 mempool_free(mbox, phba->mbox_mem_pool);
9504 return status; 9523 return status;
9505} 9524}
9506 9525
@@ -9604,10 +9623,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9604 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 9623 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9605 cq->host_index = 0; 9624 cq->host_index = 0;
9606 cq->hba_index = 0; 9625 cq->hba_index = 0;
9607out:
9608 9626
9609 if (rc != MBX_TIMEOUT) 9627out:
9610 mempool_free(mbox, phba->mbox_mem_pool); 9628 mempool_free(mbox, phba->mbox_mem_pool);
9611 return status; 9629 return status;
9612} 9630}
9613 9631
@@ -9712,8 +9730,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9712 /* link the mq onto the parent cq child list */ 9730 /* link the mq onto the parent cq child list */
9713 list_add_tail(&mq->list, &cq->child_list); 9731 list_add_tail(&mq->list, &cq->child_list);
9714out: 9732out:
9715 if (rc != MBX_TIMEOUT) 9733 mempool_free(mbox, phba->mbox_mem_pool);
9716 mempool_free(mbox, phba->mbox_mem_pool);
9717 return status; 9734 return status;
9718} 9735}
9719 9736
@@ -9795,8 +9812,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9795 /* link the wq onto the parent cq child list */ 9812 /* link the wq onto the parent cq child list */
9796 list_add_tail(&wq->list, &cq->child_list); 9813 list_add_tail(&wq->list, &cq->child_list);
9797out: 9814out:
9798 if (rc != MBX_TIMEOUT) 9815 mempool_free(mbox, phba->mbox_mem_pool);
9799 mempool_free(mbox, phba->mbox_mem_pool);
9800 return status; 9816 return status;
9801} 9817}
9802 9818
@@ -9970,8 +9986,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9970 list_add_tail(&drq->list, &cq->child_list); 9986 list_add_tail(&drq->list, &cq->child_list);
9971 9987
9972out: 9988out:
9973 if (rc != MBX_TIMEOUT) 9989 mempool_free(mbox, phba->mbox_mem_pool);
9974 mempool_free(mbox, phba->mbox_mem_pool);
9975 return status; 9990 return status;
9976} 9991}
9977 9992
@@ -10026,8 +10041,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
10026 10041
10027 /* Remove eq from any list */ 10042 /* Remove eq from any list */
10028 list_del_init(&eq->list); 10043 list_del_init(&eq->list);
10029 if (rc != MBX_TIMEOUT) 10044 mempool_free(mbox, eq->phba->mbox_mem_pool);
10030 mempool_free(mbox, eq->phba->mbox_mem_pool);
10031 return status; 10045 return status;
10032} 10046}
10033 10047
@@ -10080,8 +10094,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10080 } 10094 }
10081 /* Remove cq from any list */ 10095 /* Remove cq from any list */
10082 list_del_init(&cq->list); 10096 list_del_init(&cq->list);
10083 if (rc != MBX_TIMEOUT) 10097 mempool_free(mbox, cq->phba->mbox_mem_pool);
10084 mempool_free(mbox, cq->phba->mbox_mem_pool);
10085 return status; 10098 return status;
10086} 10099}
10087 10100
@@ -10134,8 +10147,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10134 } 10147 }
10135 /* Remove mq from any list */ 10148 /* Remove mq from any list */
10136 list_del_init(&mq->list); 10149 list_del_init(&mq->list);
10137 if (rc != MBX_TIMEOUT) 10150 mempool_free(mbox, mq->phba->mbox_mem_pool);
10138 mempool_free(mbox, mq->phba->mbox_mem_pool);
10139 return status; 10151 return status;
10140} 10152}
10141 10153
@@ -10187,8 +10199,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10187 } 10199 }
10188 /* Remove wq from any list */ 10200 /* Remove wq from any list */
10189 list_del_init(&wq->list); 10201 list_del_init(&wq->list);
10190 if (rc != MBX_TIMEOUT) 10202 mempool_free(mbox, wq->phba->mbox_mem_pool);
10191 mempool_free(mbox, wq->phba->mbox_mem_pool);
10192 return status; 10203 return status;
10193} 10204}
10194 10205
@@ -10258,8 +10269,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10258 } 10269 }
10259 list_del_init(&hrq->list); 10270 list_del_init(&hrq->list);
10260 list_del_init(&drq->list); 10271 list_del_init(&drq->list);
10261 if (rc != MBX_TIMEOUT) 10272 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10262 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10263 return status; 10273 return status;
10264} 10274}
10265 10275
@@ -10933,6 +10943,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10933 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 10943 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10934 if (first_iocbq) { 10944 if (first_iocbq) {
10935 /* Initialize the first IOCB. */ 10945 /* Initialize the first IOCB. */
10946 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
10936 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 10947 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10937 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 10948 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10938 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 10949 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -10945,6 +10956,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10945 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 10956 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10946 LPFC_DATA_BUF_SIZE; 10957 LPFC_DATA_BUF_SIZE;
10947 first_iocbq->iocb.un.rcvels.remoteID = sid; 10958 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10948 } 10961 }
10949 iocbq = first_iocbq; 10962 iocbq = first_iocbq;
10950 /* 10963 /*
@@ -10961,6 +10974,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10961 iocbq->iocb.ulpBdeCount++; 10974 iocbq->iocb.ulpBdeCount++;
10962 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10963 LPFC_DATA_BUF_SIZE; 10976 LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10964 } else { 10979 } else {
10965 iocbq = lpfc_sli_get_iocbq(vport->phba); 10980 iocbq = lpfc_sli_get_iocbq(vport->phba);
10966 if (!iocbq) { 10981 if (!iocbq) {
@@ -10978,6 +10993,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10978 iocbq->iocb.ulpBdeCount = 1; 10993 iocbq->iocb.ulpBdeCount = 1;
10979 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10980 LPFC_DATA_BUF_SIZE; 10995 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10981 iocbq->iocb.un.rcvels.remoteID = sid; 10998 iocbq->iocb.un.rcvels.remoteID = sid;
10982 list_add_tail(&iocbq->list, &first_iocbq->list); 10999 list_add_tail(&iocbq->list, &first_iocbq->list);
10983 } 11000 }
@@ -11324,7 +11341,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11324 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11341 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11325 if (!mboxq) 11342 if (!mboxq)
11326 return -ENOMEM; 11343 return -ENOMEM;
11327 lpfc_init_vpi(mboxq, vpi); 11344 lpfc_init_vpi(phba, mboxq, vpi);
11328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 11345 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11329 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11330 if (rc != MBX_TIMEOUT) 11347 if (rc != MBX_TIMEOUT)
@@ -11519,6 +11536,7 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11519 uint32_t alloc_len, req_len; 11536 uint32_t alloc_len, req_len;
11520 struct lpfc_mbx_read_fcf_tbl *read_fcf; 11537 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11521 11538
11539 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11522 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11540 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11523 if (!mboxq) { 11541 if (!mboxq) {
11524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11570,7 +11588,140 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11570 if (rc == MBX_NOT_FINISHED) { 11588 if (rc == MBX_NOT_FINISHED) {
11571 lpfc_sli4_mbox_cmd_free(phba, mboxq); 11589 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11572 error = -EIO; 11590 error = -EIO;
11573 } else 11591 } else {
11592 spin_lock_irq(&phba->hbalock);
11593 phba->hba_flag |= FCF_DISC_INPROGRESS;
11594 spin_unlock_irq(&phba->hbalock);
11574 error = 0; 11595 error = 0;
11596 }
11575 return error; 11597 return error;
11576} 11598}
11599
11600/**
11601 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
11602 * @phba: pointer to lpfc hba data structure.
11603 *
11604 * This function read region 23 and parse TLV for port status to
11605 * decide if the user disaled the port. If the TLV indicates the
11606 * port is disabled, the hba_flag is set accordingly.
11607 **/
11608void
11609lpfc_sli_read_link_ste(struct lpfc_hba *phba)
11610{
11611 LPFC_MBOXQ_t *pmb = NULL;
11612 MAILBOX_t *mb;
11613 uint8_t *rgn23_data = NULL;
11614 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
11615 int rc;
11616
11617 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11618 if (!pmb) {
11619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11620 "2600 lpfc_sli_read_serdes_param failed to"
11621 " allocate mailbox memory\n");
11622 goto out;
11623 }
11624 mb = &pmb->u.mb;
11625
11626 /* Get adapter Region 23 data */
11627 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
11628 if (!rgn23_data)
11629 goto out;
11630
11631 do {
11632 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
11633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11634
11635 if (rc != MBX_SUCCESS) {
11636 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11637 "2601 lpfc_sli_read_link_ste failed to"
11638 " read config region 23 rc 0x%x Status 0x%x\n",
11639 rc, mb->mbxStatus);
11640 mb->un.varDmp.word_cnt = 0;
11641 }
11642 /*
11643 * dump mem may return a zero when finished or we got a
11644 * mailbox error, either way we are done.
11645 */
11646 if (mb->un.varDmp.word_cnt == 0)
11647 break;
11648 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
11649 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
11650
11651 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
11652 rgn23_data + offset,
11653 mb->un.varDmp.word_cnt);
11654 offset += mb->un.varDmp.word_cnt;
11655 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
11656
11657 data_size = offset;
11658 offset = 0;
11659
11660 if (!data_size)
11661 goto out;
11662
11663 /* Check the region signature first */
11664 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
11665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11666 "2619 Config region 23 has bad signature\n");
11667 goto out;
11668 }
11669 offset += 4;
11670
11671 /* Check the data structure version */
11672 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
11673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11674 "2620 Config region 23 has bad version\n");
11675 goto out;
11676 }
11677 offset += 4;
11678
11679 /* Parse TLV entries in the region */
11680 while (offset < data_size) {
11681 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
11682 break;
11683 /*
11684 * If the TLV is not driver specific TLV or driver id is
11685 * not linux driver id, skip the record.
11686 */
11687 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
11688 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
11689 (rgn23_data[offset + 3] != 0)) {
11690 offset += rgn23_data[offset + 1] * 4 + 4;
11691 continue;
11692 }
11693
11694 /* Driver found a driver specific TLV in the config region */
11695 sub_tlv_len = rgn23_data[offset + 1] * 4;
11696 offset += 4;
11697 tlv_offset = 0;
11698
11699 /*
11700 * Search for configured port state sub-TLV.
11701 */
11702 while ((offset < data_size) &&
11703 (tlv_offset < sub_tlv_len)) {
11704 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
11705 offset += 4;
11706 tlv_offset += 4;
11707 break;
11708 }
11709 if (rgn23_data[offset] != PORT_STE_TYPE) {
11710 offset += rgn23_data[offset + 1] * 4 + 4;
11711 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
11712 continue;
11713 }
11714
11715 /* This HBA contains PORT_STE configured */
11716 if (!rgn23_data[offset + 2])
11717 phba->hba_flag |= LINK_DISABLED;
11718
11719 goto out;
11720 }
11721 }
11722out:
11723 if (pmb)
11724 mempool_free(pmb, phba->mbox_mem_pool);
11725 kfree(rgn23_data);
11726 return;
11727}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3b276b47d18f..b5f4ba1a5c27 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -132,6 +132,7 @@ struct lpfc_sli4_link {
132 132
133struct lpfc_fcf { 133struct lpfc_fcf {
134 uint8_t fabric_name[8]; 134 uint8_t fabric_name[8];
135 uint8_t switch_name[8];
135 uint8_t mac_addr[6]; 136 uint8_t mac_addr[6];
136 uint16_t fcf_indx; 137 uint16_t fcf_indx;
137 uint16_t fcfi; 138 uint16_t fcfi;
@@ -150,6 +151,10 @@ struct lpfc_fcf {
150#define LPFC_REGION23_SIGNATURE "RG23" 151#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1 152#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff 153#define LPFC_REGION23_LAST_REC 0xff
154#define DRIVER_SPECIFIC_TYPE 0xA2
155#define LINUX_DRIVER_ID 0x20
156#define PORT_STE_TYPE 0x1
157
153struct lpfc_fip_param_hdr { 158struct lpfc_fip_param_hdr {
154 uint8_t type; 159 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0 160#define FCOE_PARAM_TYPE 0xA0
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41094e02304b..9ae20af4bdb7 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.3" 21#define LPFC_DRIVER_VERSION "8.3.4"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e0b49922193e..606efa767548 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -313,22 +313,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
313 goto error_out; 313 goto error_out;
314 } 314 }
315 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
331
332 /* Assign an unused board number */ 316 /* Assign an unused board number */
333 if ((instance = lpfc_get_instance()) < 0) { 317 if ((instance = lpfc_get_instance()) < 0) {
334 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 318 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -367,12 +351,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
367 goto error_out; 351 goto error_out;
368 } 352 }
369 353
370 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); 354 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
371 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); 355 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
372 if (fc_vport->node_name != 0)
373 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
374 if (fc_vport->port_name != 0)
375 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
376 356
377 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); 357 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
378 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); 358 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
@@ -404,7 +384,34 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
404 *(struct lpfc_vport **)fc_vport->dd_data = vport; 384 *(struct lpfc_vport **)fc_vport->dd_data = vport;
405 vport->fc_vport = fc_vport; 385 vport->fc_vport = fc_vport;
406 386
387 /*
388 * In SLI4, the vpi must be activated before it can be used
389 * by the port.
390 */
391 if ((phba->sli_rev == LPFC_SLI_REV4) &&
392 (pport->vfi_state & LPFC_VFI_REGISTERED)) {
393 rc = lpfc_sli4_init_vpi(phba, vpi);
394 if (rc) {
395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
396 "1838 Failed to INIT_VPI on vpi %d "
397 "status %d\n", vpi, rc);
398 rc = VPORT_NORESOURCES;
399 lpfc_free_vpi(phba, vpi);
400 goto error_out;
401 }
402 } else if (phba->sli_rev == LPFC_SLI_REV4) {
403 /*
404 * Driver cannot INIT_VPI now. Set the flags to
405 * init_vpi when reg_vfi complete.
406 */
407 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
408 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
409 rc = VPORT_OK;
410 goto out;
411 }
412
407 if ((phba->link_state < LPFC_LINK_UP) || 413 if ((phba->link_state < LPFC_LINK_UP) ||
414 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
408 (phba->fc_topology == TOPOLOGY_LOOP)) { 415 (phba->fc_topology == TOPOLOGY_LOOP)) {
409 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 416 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
410 rc = VPORT_OK; 417 rc = VPORT_OK;
@@ -661,7 +668,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
661 lpfc_printf_log(vport->phba, KERN_WARNING, 668 lpfc_printf_log(vport->phba, KERN_WARNING,
662 LOG_VPORT, 669 LOG_VPORT,
663 "1829 CT command failed to " 670 "1829 CT command failed to "
664 "delete objects on fabric. \n"); 671 "delete objects on fabric\n");
665 } 672 }
666 /* First look for the Fabric ndlp */ 673 /* First look for the Fabric ndlp */
667 ndlp = lpfc_findnode_did(vport, Fabric_DID); 674 ndlp = lpfc_findnode_did(vport, Fabric_DID);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 35a13867495e..d95d2f274cb3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -94,7 +94,7 @@ _base_fault_reset_work(struct work_struct *work)
94 int rc; 94 int rc;
95 95
96 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 96 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
97 if (ioc->ioc_reset_in_progress) 97 if (ioc->shost_recovery)
98 goto rearm_timer; 98 goto rearm_timer;
99 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 99 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
100 100
@@ -687,6 +687,14 @@ _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
687 ioc->mask_interrupts = 0; 687 ioc->mask_interrupts = 0;
688} 688}
689 689
690union reply_descriptor {
691 u64 word;
692 struct {
693 u32 low;
694 u32 high;
695 } u;
696};
697
690/** 698/**
691 * _base_interrupt - MPT adapter (IOC) specific interrupt handler. 699 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
692 * @irq: irq number (not used) 700 * @irq: irq number (not used)
@@ -698,47 +706,38 @@ _base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
698static irqreturn_t 706static irqreturn_t
699_base_interrupt(int irq, void *bus_id) 707_base_interrupt(int irq, void *bus_id)
700{ 708{
701 union reply_descriptor {
702 u64 word;
703 struct {
704 u32 low;
705 u32 high;
706 } u;
707 };
708 union reply_descriptor rd; 709 union reply_descriptor rd;
709 u32 post_index, post_index_next, completed_cmds; 710 u32 completed_cmds;
710 u8 request_desript_type; 711 u8 request_desript_type;
711 u16 smid; 712 u16 smid;
712 u8 cb_idx; 713 u8 cb_idx;
713 u32 reply; 714 u32 reply;
714 u8 VF_ID; 715 u8 VF_ID;
715 int i;
716 struct MPT2SAS_ADAPTER *ioc = bus_id; 716 struct MPT2SAS_ADAPTER *ioc = bus_id;
717 Mpi2ReplyDescriptorsUnion_t *rpf;
717 718
718 if (ioc->mask_interrupts) 719 if (ioc->mask_interrupts)
719 return IRQ_NONE; 720 return IRQ_NONE;
720 721
721 post_index = ioc->reply_post_host_index; 722 rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
722 request_desript_type = ioc->reply_post_free[post_index]. 723 request_desript_type = rpf->Default.ReplyFlags
723 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 724 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
724 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 725 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
725 return IRQ_NONE; 726 return IRQ_NONE;
726 727
727 completed_cmds = 0; 728 completed_cmds = 0;
728 do { 729 do {
729 rd.word = ioc->reply_post_free[post_index].Words; 730 rd.word = rpf->Words;
730 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 731 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
731 goto out; 732 goto out;
732 reply = 0; 733 reply = 0;
733 cb_idx = 0xFF; 734 cb_idx = 0xFF;
734 smid = le16_to_cpu(ioc->reply_post_free[post_index]. 735 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
735 Default.DescriptorTypeDependent1); 736 VF_ID = rpf->Default.VF_ID;
736 VF_ID = ioc->reply_post_free[post_index].
737 Default.VF_ID;
738 if (request_desript_type == 737 if (request_desript_type ==
739 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 738 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
740 reply = le32_to_cpu(ioc->reply_post_free[post_index]. 739 reply = le32_to_cpu
741 AddressReply.ReplyFrameAddress); 740 (rpf->AddressReply.ReplyFrameAddress);
742 } else if (request_desript_type == 741 } else if (request_desript_type ==
743 MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER) 742 MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
744 goto next; 743 goto next;
@@ -765,21 +764,27 @@ _base_interrupt(int irq, void *bus_id)
765 0 : ioc->reply_free_host_index + 1; 764 0 : ioc->reply_free_host_index + 1;
766 ioc->reply_free[ioc->reply_free_host_index] = 765 ioc->reply_free[ioc->reply_free_host_index] =
767 cpu_to_le32(reply); 766 cpu_to_le32(reply);
767 wmb();
768 writel(ioc->reply_free_host_index, 768 writel(ioc->reply_free_host_index,
769 &ioc->chip->ReplyFreeHostIndex); 769 &ioc->chip->ReplyFreeHostIndex);
770 wmb();
771 } 770 }
772 771
773 next: 772 next:
774 post_index_next = (post_index == (ioc->reply_post_queue_depth - 773
775 1)) ? 0 : post_index + 1; 774 rpf->Words = ULLONG_MAX;
775 ioc->reply_post_host_index = (ioc->reply_post_host_index ==
776 (ioc->reply_post_queue_depth - 1)) ? 0 :
777 ioc->reply_post_host_index + 1;
776 request_desript_type = 778 request_desript_type =
777 ioc->reply_post_free[post_index_next].Default.ReplyFlags 779 ioc->reply_post_free[ioc->reply_post_host_index].Default.
778 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 780 ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
779 completed_cmds++; 781 completed_cmds++;
780 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 782 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
781 goto out; 783 goto out;
782 post_index = post_index_next; 784 if (!ioc->reply_post_host_index)
785 rpf = ioc->reply_post_free;
786 else
787 rpf++;
783 } while (1); 788 } while (1);
784 789
785 out: 790 out:
@@ -787,19 +792,8 @@ _base_interrupt(int irq, void *bus_id)
787 if (!completed_cmds) 792 if (!completed_cmds)
788 return IRQ_NONE; 793 return IRQ_NONE;
789 794
790 /* reply post descriptor handling */
791 post_index_next = ioc->reply_post_host_index;
792 for (i = 0 ; i < completed_cmds; i++) {
793 post_index = post_index_next;
794 /* poison the reply post descriptor */
795 ioc->reply_post_free[post_index_next].Words = ULLONG_MAX;
796 post_index_next = (post_index ==
797 (ioc->reply_post_queue_depth - 1))
798 ? 0 : post_index + 1;
799 }
800 ioc->reply_post_host_index = post_index_next;
801 writel(post_index_next, &ioc->chip->ReplyPostHostIndex);
802 wmb(); 795 wmb();
796 writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
803 return IRQ_HANDLED; 797 return IRQ_HANDLED;
804} 798}
805 799
@@ -1542,6 +1536,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1542 (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8, 1536 (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
1543 ioc->bios_pg3.BiosVersion & 0x000000FF); 1537 ioc->bios_pg3.BiosVersion & 0x000000FF);
1544 1538
1539 _base_display_dell_branding(ioc);
1540
1545 printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); 1541 printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
1546 1542
1547 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 1543 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -1554,8 +1550,6 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1554 i++; 1550 i++;
1555 } 1551 }
1556 1552
1557 _base_display_dell_branding(ioc);
1558
1559 i = 0; 1553 i = 0;
1560 printk("), "); 1554 printk("), ");
1561 printk("Capabilities=("); 1555 printk("Capabilities=(");
@@ -1627,6 +1621,9 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1627 u32 iounit_pg1_flags; 1621 u32 iounit_pg1_flags;
1628 1622
1629 mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); 1623 mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
1624 if (ioc->ir_firmware)
1625 mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
1626 &ioc->manu_pg10);
1630 mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 1627 mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
1631 mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 1628 mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
1632 mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); 1629 mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
@@ -1647,7 +1644,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1647 iounit_pg1_flags |= 1644 iounit_pg1_flags |=
1648 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 1645 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1649 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 1646 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
1650 mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, ioc->iounit_pg1); 1647 mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
1651} 1648}
1652 1649
1653/** 1650/**
@@ -3303,13 +3300,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3303 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3300 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3304 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 3301 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
3305 mutex_init(&ioc->tm_cmds.mutex); 3302 mutex_init(&ioc->tm_cmds.mutex);
3306 init_completion(&ioc->tm_cmds.done);
3307 3303
3308 /* config page internal command bits */ 3304 /* config page internal command bits */
3309 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3305 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3310 ioc->config_cmds.status = MPT2_CMD_NOT_USED; 3306 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
3311 mutex_init(&ioc->config_cmds.mutex); 3307 mutex_init(&ioc->config_cmds.mutex);
3312 init_completion(&ioc->config_cmds.done);
3313 3308
3314 /* ctl module internal command bits */ 3309 /* ctl module internal command bits */
3315 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3310 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
@@ -3433,6 +3428,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3433 if (ioc->config_cmds.status & MPT2_CMD_PENDING) { 3428 if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
3434 ioc->config_cmds.status |= MPT2_CMD_RESET; 3429 ioc->config_cmds.status |= MPT2_CMD_RESET;
3435 mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); 3430 mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
3431 ioc->config_cmds.smid = USHORT_MAX;
3436 complete(&ioc->config_cmds.done); 3432 complete(&ioc->config_cmds.done);
3437 } 3433 }
3438 break; 3434 break;
@@ -3501,20 +3497,13 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3501 __func__)); 3497 __func__));
3502 3498
3503 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 3499 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3504 if (ioc->ioc_reset_in_progress) { 3500 if (ioc->shost_recovery) {
3505 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3501 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3506 printk(MPT2SAS_ERR_FMT "%s: busy\n", 3502 printk(MPT2SAS_ERR_FMT "%s: busy\n",
3507 ioc->name, __func__); 3503 ioc->name, __func__);
3508 return -EBUSY; 3504 return -EBUSY;
3509 } 3505 }
3510 ioc->ioc_reset_in_progress = 1;
3511 ioc->shost_recovery = 1; 3506 ioc->shost_recovery = 1;
3512 if (ioc->shost->shost_state == SHOST_RUNNING) {
3513 /* set back to SHOST_RUNNING in mpt2sas_scsih.c */
3514 scsi_host_set_state(ioc->shost, SHOST_RECOVERY);
3515 printk(MPT2SAS_INFO_FMT "putting controller into "
3516 "SHOST_RECOVERY\n", ioc->name);
3517 }
3518 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3507 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3519 3508
3520 _base_reset_handler(ioc, MPT2_IOC_PRE_RESET); 3509 _base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
@@ -3534,7 +3523,10 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3534 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 3523 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
3535 3524
3536 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 3525 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3537 ioc->ioc_reset_in_progress = 0; 3526 ioc->shost_recovery = 0;
3538 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3527 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3528
3529 if (!r)
3530 _base_reset_handler(ioc, MPT2_IOC_RUNNING);
3539 return r; 3531 return r;
3540} 3532}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index acdcff150a35..2faab1e690e9 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "01.100.04.00" 72#define MPT2SAS_DRIVER_VERSION "01.100.06.00"
73#define MPT2SAS_MAJOR_VERSION 01 73#define MPT2SAS_MAJOR_VERSION 01
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 04 75#define MPT2SAS_BUILD_VERSION 06
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
77 77
78/* 78/*
@@ -119,6 +119,7 @@
119#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */ 119#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
120#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */ 120#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
121#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */ 121#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
122#define MPT2_IOC_RUNNING 4 /* shost running */
122 123
123/* 124/*
124 * logging format 125 * logging format
@@ -196,6 +197,38 @@ struct MPT2SAS_TARGET {
196 * @block: device is in SDEV_BLOCK state 197 * @block: device is in SDEV_BLOCK state
197 * @tlr_snoop_check: flag used in determining whether to disable TLR 198 * @tlr_snoop_check: flag used in determining whether to disable TLR
198 */ 199 */
200
201/* OEM Identifiers */
202#define MFG10_OEM_ID_INVALID (0x00000000)
203#define MFG10_OEM_ID_DELL (0x00000001)
204#define MFG10_OEM_ID_FSC (0x00000002)
205#define MFG10_OEM_ID_SUN (0x00000003)
206#define MFG10_OEM_ID_IBM (0x00000004)
207
208/* GENERIC Flags 0*/
209#define MFG10_GF0_OCE_DISABLED (0x00000001)
210#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
211#define MFG10_GF0_R10_DISPLAY (0x00000004)
212#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
213#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
214
215/* OEM Specific Flags will come from OEM specific header files */
216typedef struct _MPI2_CONFIG_PAGE_MAN_10 {
217 MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
218 U8 OEMIdentifier; /* 04h */
219 U8 Reserved1; /* 05h */
220 U16 Reserved2; /* 08h */
221 U32 Reserved3; /* 0Ch */
222 U32 GenericFlags0; /* 10h */
223 U32 GenericFlags1; /* 14h */
224 U32 Reserved4; /* 18h */
225 U32 OEMSpecificFlags0; /* 1Ch */
226 U32 OEMSpecificFlags1; /* 20h */
227 U32 Reserved5[18]; /* 24h-60h*/
228} MPI2_CONFIG_PAGE_MAN_10,
229 MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10,
230 Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t;
231
199struct MPT2SAS_DEVICE { 232struct MPT2SAS_DEVICE {
200 struct MPT2SAS_TARGET *sas_target; 233 struct MPT2SAS_TARGET *sas_target;
201 unsigned int lun; 234 unsigned int lun;
@@ -431,7 +464,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
431 * @fw_event_list: list of fw events 464 * @fw_event_list: list of fw events
432 * @aen_event_read_flag: event log was read 465 * @aen_event_read_flag: event log was read
433 * @broadcast_aen_busy: broadcast aen waiting to be serviced 466 * @broadcast_aen_busy: broadcast aen waiting to be serviced
434 * @ioc_reset_in_progress: host reset in progress 467 * @shost_recovery: host reset in progress
435 * @ioc_reset_in_progress_lock: 468 * @ioc_reset_in_progress_lock:
436 * @ioc_link_reset_in_progress: phy/hard reset in progress 469 * @ioc_link_reset_in_progress: phy/hard reset in progress
437 * @ignore_loginfos: ignore loginfos during task managment 470 * @ignore_loginfos: ignore loginfos during task managment
@@ -460,6 +493,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
460 * @facts: static facts data 493 * @facts: static facts data
461 * @pfacts: static port facts data 494 * @pfacts: static port facts data
462 * @manu_pg0: static manufacturing page 0 495 * @manu_pg0: static manufacturing page 0
496 * @manu_pg10: static manufacturing page 10
463 * @bios_pg2: static bios page 2 497 * @bios_pg2: static bios page 2
464 * @bios_pg3: static bios page 3 498 * @bios_pg3: static bios page 3
465 * @ioc_pg8: static ioc page 8 499 * @ioc_pg8: static ioc page 8
@@ -544,7 +578,6 @@ struct MPT2SAS_ADAPTER {
544 /* misc flags */ 578 /* misc flags */
545 int aen_event_read_flag; 579 int aen_event_read_flag;
546 u8 broadcast_aen_busy; 580 u8 broadcast_aen_busy;
547 u8 ioc_reset_in_progress;
548 u8 shost_recovery; 581 u8 shost_recovery;
549 spinlock_t ioc_reset_in_progress_lock; 582 spinlock_t ioc_reset_in_progress_lock;
550 u8 ioc_link_reset_in_progress; 583 u8 ioc_link_reset_in_progress;
@@ -663,6 +696,7 @@ struct MPT2SAS_ADAPTER {
663 dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT]; 696 dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
664 u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT]; 697 u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
665 u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT]; 698 u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
699 Mpi2ManufacturingPage10_t manu_pg10;
666 u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; 700 u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
667 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; 701 u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
668}; 702};
@@ -734,6 +768,8 @@ void mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 re
734int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys); 768int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys);
735int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, 769int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
736 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); 770 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
771int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
772 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page);
737int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 773int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
738 *mpi_reply, Mpi2BiosPage2_t *config_page); 774 *mpi_reply, Mpi2BiosPage2_t *config_page);
739int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 775int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -749,7 +785,7 @@ int mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRep
749int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 785int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
750 *mpi_reply, Mpi2IOUnitPage1_t *config_page); 786 *mpi_reply, Mpi2IOUnitPage1_t *config_page);
751int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 787int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
752 *mpi_reply, Mpi2IOUnitPage1_t config_page); 788 *mpi_reply, Mpi2IOUnitPage1_t *config_page);
753int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 789int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
754 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); 790 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
755int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 791int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -776,7 +812,6 @@ int mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
776 u16 *volume_handle); 812 u16 *volume_handle);
777int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle, 813int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
778 u64 *wwid); 814 u64 *wwid);
779
780/* ctl shared API */ 815/* ctl shared API */
781extern struct device_attribute *mpt2sas_host_attrs[]; 816extern struct device_attribute *mpt2sas_host_attrs[];
782extern struct device_attribute *mpt2sas_dev_attrs[]; 817extern struct device_attribute *mpt2sas_dev_attrs[];
@@ -798,9 +833,11 @@ int mpt2sas_transport_add_host_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
798 *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); 833 *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
799int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy 834int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
800 *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev); 835 *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev);
801void mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle, 836void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle,
802 u16 attached_handle, u8 phy_number, u8 link_rate); 837 u16 attached_handle, u8 phy_number, u8 link_rate);
803extern struct sas_function_template mpt2sas_transport_functions; 838extern struct sas_function_template mpt2sas_transport_functions;
804extern struct scsi_transport_template *mpt2sas_transport_template; 839extern struct scsi_transport_template *mpt2sas_transport_template;
840extern int scsi_internal_device_block(struct scsi_device *sdev);
841extern int scsi_internal_device_unblock(struct scsi_device *sdev);
805 842
806#endif /* MPT2SAS_BASE_H_INCLUDED */ 843#endif /* MPT2SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 6ddee161beb3..ab8c560865d8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -72,15 +72,15 @@
72 72
73/** 73/**
74 * struct config_request - obtain dma memory via routine 74 * struct config_request - obtain dma memory via routine
75 * @config_page_sz: size 75 * @sz: size
76 * @config_page: virt pointer 76 * @page: virt pointer
77 * @config_page_dma: phys pointer 77 * @page_dma: phys pointer
78 * 78 *
79 */ 79 */
80struct config_request{ 80struct config_request{
81 u16 config_page_sz; 81 u16 sz;
82 void *config_page; 82 void *page;
83 dma_addr_t config_page_dma; 83 dma_addr_t page_dma;
84}; 84};
85 85
86#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 86#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -175,6 +175,55 @@ _config_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
175#endif 175#endif
176 176
177/** 177/**
178 * _config_alloc_config_dma_memory - obtain physical memory
179 * @ioc: per adapter object
180 * @mem: struct config_request
181 *
182 * A wrapper for obtaining dma-able memory for config page request.
183 *
184 * Returns 0 for success, non-zero for failure.
185 */
186static int
187_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
188 struct config_request *mem)
189{
190 int r = 0;
191
192 if (mem->sz > ioc->config_page_sz) {
193 mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
194 &mem->page_dma, GFP_KERNEL);
195 if (!mem->page) {
196 printk(MPT2SAS_ERR_FMT "%s: dma_alloc_coherent"
197 " failed asking for (%d) bytes!!\n",
198 ioc->name, __func__, mem->sz);
199 r = -ENOMEM;
200 }
201 } else { /* use tmp buffer if less than 512 bytes */
202 mem->page = ioc->config_page;
203 mem->page_dma = ioc->config_page_dma;
204 }
205 return r;
206}
207
208/**
209 * _config_free_config_dma_memory - wrapper to free the memory
210 * @ioc: per adapter object
211 * @mem: struct config_request
212 *
213 * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
214 *
215 * Returns 0 for success, non-zero for failure.
216 */
217static void
218_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
219 struct config_request *mem)
220{
221 if (mem->sz > ioc->config_page_sz)
222 dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
223 mem->page_dma);
224}
225
226/**
178 * mpt2sas_config_done - config page completion routine 227 * mpt2sas_config_done - config page completion routine
179 * @ioc: per adapter object 228 * @ioc: per adapter object
180 * @smid: system request message index 229 * @smid: system request message index
@@ -206,6 +255,7 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
206#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 255#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
207 _config_display_some_debug(ioc, smid, "config_done", mpi_reply); 256 _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
208#endif 257#endif
258 ioc->config_cmds.smid = USHORT_MAX;
209 complete(&ioc->config_cmds.done); 259 complete(&ioc->config_cmds.done);
210} 260}
211 261
@@ -215,7 +265,9 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
215 * @mpi_request: request message frame 265 * @mpi_request: request message frame
216 * @mpi_reply: reply mf payload returned from firmware 266 * @mpi_reply: reply mf payload returned from firmware
217 * @timeout: timeout in seconds 267 * @timeout: timeout in seconds
218 * Context: sleep, the calling function needs to acquire the config_cmds.mutex 268 * @config_page: contents of the config page
269 * @config_page_sz: size of config page
270 * Context: sleep
219 * 271 *
220 * A generic API for config page requests to firmware. 272 * A generic API for config page requests to firmware.
221 * 273 *
@@ -228,16 +280,17 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
228 */ 280 */
229static int 281static int
230_config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t 282_config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
231 *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout) 283 *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
284 void *config_page, u16 config_page_sz)
232{ 285{
233 u16 smid; 286 u16 smid;
234 u32 ioc_state; 287 u32 ioc_state;
235 unsigned long timeleft; 288 unsigned long timeleft;
236 Mpi2ConfigRequest_t *config_request; 289 Mpi2ConfigRequest_t *config_request;
237 int r; 290 int r;
238 u8 retry_count; 291 u8 retry_count, issue_host_reset = 0;
239 u8 issue_host_reset = 0;
240 u16 wait_state_count; 292 u16 wait_state_count;
293 struct config_request mem;
241 294
242 mutex_lock(&ioc->config_cmds.mutex); 295 mutex_lock(&ioc->config_cmds.mutex);
243 if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { 296 if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
@@ -246,12 +299,44 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
246 mutex_unlock(&ioc->config_cmds.mutex); 299 mutex_unlock(&ioc->config_cmds.mutex);
247 return -EAGAIN; 300 return -EAGAIN;
248 } 301 }
302
249 retry_count = 0; 303 retry_count = 0;
304 memset(&mem, 0, sizeof(struct config_request));
305
306 if (config_page) {
307 mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
308 mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
309 mpi_request->Header.PageType = mpi_reply->Header.PageType;
310 mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
311 mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
312 mpi_request->ExtPageType = mpi_reply->ExtPageType;
313 if (mpi_request->Header.PageLength)
314 mem.sz = mpi_request->Header.PageLength * 4;
315 else
316 mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
317 r = _config_alloc_config_dma_memory(ioc, &mem);
318 if (r != 0)
319 goto out;
320 if (mpi_request->Action ==
321 MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) {
322 ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
323 MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
324 mem.page_dma);
325 memcpy(mem.page, config_page, min_t(u16, mem.sz,
326 config_page_sz));
327 } else {
328 memset(config_page, 0, config_page_sz);
329 ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
330 MPT2_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
331 }
332 }
250 333
251 retry_config: 334 retry_config:
252 if (retry_count) { 335 if (retry_count) {
253 if (retry_count > 2) /* attempt only 2 retries */ 336 if (retry_count > 2) { /* attempt only 2 retries */
254 return -EFAULT; 337 r = -EFAULT;
338 goto free_mem;
339 }
255 printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n", 340 printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
256 ioc->name, __func__, retry_count); 341 ioc->name, __func__, retry_count);
257 } 342 }
@@ -262,8 +347,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
262 printk(MPT2SAS_ERR_FMT 347 printk(MPT2SAS_ERR_FMT
263 "%s: failed due to ioc not operational\n", 348 "%s: failed due to ioc not operational\n",
264 ioc->name, __func__); 349 ioc->name, __func__);
350 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
265 r = -EFAULT; 351 r = -EFAULT;
266 goto out; 352 goto free_mem;
267 } 353 }
268 ssleep(1); 354 ssleep(1);
269 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 355 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
@@ -279,8 +365,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
279 if (!smid) { 365 if (!smid) {
280 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 366 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
281 ioc->name, __func__); 367 ioc->name, __func__);
368 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
282 r = -EAGAIN; 369 r = -EAGAIN;
283 goto out; 370 goto free_mem;
284 } 371 }
285 372
286 r = 0; 373 r = 0;
@@ -292,6 +379,7 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
292#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 379#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
293 _config_display_some_debug(ioc, smid, "config_request", NULL); 380 _config_display_some_debug(ioc, smid, "config_request", NULL);
294#endif 381#endif
382 init_completion(&ioc->config_cmds.done);
295 mpt2sas_base_put_smid_default(ioc, smid, config_request->VF_ID); 383 mpt2sas_base_put_smid_default(ioc, smid, config_request->VF_ID);
296 timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, 384 timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
297 timeout*HZ); 385 timeout*HZ);
@@ -303,22 +391,31 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
303 retry_count++; 391 retry_count++;
304 if (ioc->config_cmds.smid == smid) 392 if (ioc->config_cmds.smid == smid)
305 mpt2sas_base_free_smid(ioc, smid); 393 mpt2sas_base_free_smid(ioc, smid);
306 if ((ioc->shost_recovery) || 394 if ((ioc->shost_recovery) || (ioc->config_cmds.status &
307 (ioc->config_cmds.status & MPT2_CMD_RESET)) 395 MPT2_CMD_RESET))
308 goto retry_config; 396 goto retry_config;
309 issue_host_reset = 1; 397 issue_host_reset = 1;
310 r = -EFAULT; 398 r = -EFAULT;
311 goto out; 399 goto free_mem;
312 } 400 }
401
313 if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) 402 if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
314 memcpy(mpi_reply, ioc->config_cmds.reply, 403 memcpy(mpi_reply, ioc->config_cmds.reply,
315 sizeof(Mpi2ConfigReply_t)); 404 sizeof(Mpi2ConfigReply_t));
316 if (retry_count) 405 if (retry_count)
317 printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n", 406 printk(MPT2SAS_INFO_FMT "%s: retry (%d) completed!!\n",
318 ioc->name, __func__); 407 ioc->name, __func__, retry_count);
319out: 408 if (config_page && mpi_request->Action ==
409 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT)
410 memcpy(config_page, mem.page, min_t(u16, mem.sz,
411 config_page_sz));
412 free_mem:
413 if (config_page)
414 _config_free_config_dma_memory(ioc, &mem);
415 out:
320 ioc->config_cmds.status = MPT2_CMD_NOT_USED; 416 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
321 mutex_unlock(&ioc->config_cmds.mutex); 417 mutex_unlock(&ioc->config_cmds.mutex);
418
322 if (issue_host_reset) 419 if (issue_host_reset)
323 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 420 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
324 FORCE_BIG_HAMMER); 421 FORCE_BIG_HAMMER);
@@ -326,46 +423,43 @@ out:
326} 423}
327 424
328/** 425/**
329 * _config_alloc_config_dma_memory - obtain physical memory 426 * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
330 * @ioc: per adapter object 427 * @ioc: per adapter object
331 * @mem: struct config_request 428 * @mpi_reply: reply mf payload returned from firmware
332 * 429 * @config_page: contents of the config page
333 * A wrapper for obtaining dma-able memory for config page request. 430 * Context: sleep.
334 * 431 *
335 * Returns 0 for success, non-zero for failure. 432 * Returns 0 for success, non-zero for failure.
336 */ 433 */
337static int 434int
338_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc, 435mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
339 struct config_request *mem) 436 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page)
340{ 437{
341 int r = 0; 438 Mpi2ConfigRequest_t mpi_request;
439 int r;
342 440
343 mem->config_page = pci_alloc_consistent(ioc->pdev, mem->config_page_sz, 441 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
344 &mem->config_page_dma); 442 mpi_request.Function = MPI2_FUNCTION_CONFIG;
345 if (!mem->config_page) 443 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
346 r = -ENOMEM; 444 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
347 return r; 445 mpi_request.Header.PageNumber = 0;
348} 446 mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
447 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
448 r = _config_request(ioc, &mpi_request, mpi_reply,
449 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
450 if (r)
451 goto out;
349 452
350/** 453 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
351 * _config_free_config_dma_memory - wrapper to free the memory 454 r = _config_request(ioc, &mpi_request, mpi_reply,
352 * @ioc: per adapter object 455 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
353 * @mem: struct config_request 456 sizeof(*config_page));
354 * 457 out:
355 * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. 458 return r;
356 *
357 * Returns 0 for success, non-zero for failure.
358 */
359static void
360_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
361 struct config_request *mem)
362{
363 pci_free_consistent(ioc->pdev, mem->config_page_sz, mem->config_page,
364 mem->config_page_dma);
365} 459}
366 460
367/** 461/**
368 * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 462 * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
369 * @ioc: per adapter object 463 * @ioc: per adapter object
370 * @mpi_reply: reply mf payload returned from firmware 464 * @mpi_reply: reply mf payload returned from firmware
371 * @config_page: contents of the config page 465 * @config_page: contents of the config page
@@ -374,53 +468,28 @@ _config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
374 * Returns 0 for success, non-zero for failure. 468 * Returns 0 for success, non-zero for failure.
375 */ 469 */
376int 470int
377mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, 471mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
378 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page) 472 Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page)
379{ 473{
380 Mpi2ConfigRequest_t mpi_request; 474 Mpi2ConfigRequest_t mpi_request;
381 int r; 475 int r;
382 struct config_request mem;
383 476
384 memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
385 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 477 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
386 mpi_request.Function = MPI2_FUNCTION_CONFIG; 478 mpi_request.Function = MPI2_FUNCTION_CONFIG;
387 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 479 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
388 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; 480 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
389 mpi_request.Header.PageNumber = 0; 481 mpi_request.Header.PageNumber = 10;
390 mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; 482 mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
391 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 483 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
392 r = _config_request(ioc, &mpi_request, mpi_reply, 484 r = _config_request(ioc, &mpi_request, mpi_reply,
393 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 485 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
394 if (r) 486 if (r)
395 goto out; 487 goto out;
396 488
397 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 489 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
398 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
399 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
400 mpi_request.Header.PageType = mpi_reply->Header.PageType;
401 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
402 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
403 if (mem.config_page_sz > ioc->config_page_sz) {
404 r = _config_alloc_config_dma_memory(ioc, &mem);
405 if (r)
406 goto out;
407 } else {
408 mem.config_page_dma = ioc->config_page_dma;
409 mem.config_page = ioc->config_page;
410 }
411 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
412 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
413 mem.config_page_dma);
414 r = _config_request(ioc, &mpi_request, mpi_reply, 490 r = _config_request(ioc, &mpi_request, mpi_reply,
415 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 491 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
416 if (!r) 492 sizeof(*config_page));
417 memcpy(config_page, mem.config_page,
418 min_t(u16, mem.config_page_sz,
419 sizeof(Mpi2ManufacturingPage0_t)));
420
421 if (mem.config_page_sz > ioc->config_page_sz)
422 _config_free_config_dma_memory(ioc, &mem);
423
424 out: 493 out:
425 return r; 494 return r;
426} 495}
@@ -440,9 +509,7 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
440{ 509{
441 Mpi2ConfigRequest_t mpi_request; 510 Mpi2ConfigRequest_t mpi_request;
442 int r; 511 int r;
443 struct config_request mem;
444 512
445 memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
446 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 513 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
447 mpi_request.Function = MPI2_FUNCTION_CONFIG; 514 mpi_request.Function = MPI2_FUNCTION_CONFIG;
448 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 515 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -451,37 +518,14 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
451 mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; 518 mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
452 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 519 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
453 r = _config_request(ioc, &mpi_request, mpi_reply, 520 r = _config_request(ioc, &mpi_request, mpi_reply,
454 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 521 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
455 if (r) 522 if (r)
456 goto out; 523 goto out;
457 524
458 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 525 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
459 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
460 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
461 mpi_request.Header.PageType = mpi_reply->Header.PageType;
462 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
463 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
464 if (mem.config_page_sz > ioc->config_page_sz) {
465 r = _config_alloc_config_dma_memory(ioc, &mem);
466 if (r)
467 goto out;
468 } else {
469 mem.config_page_dma = ioc->config_page_dma;
470 mem.config_page = ioc->config_page;
471 }
472 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
473 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
474 mem.config_page_dma);
475 r = _config_request(ioc, &mpi_request, mpi_reply, 526 r = _config_request(ioc, &mpi_request, mpi_reply,
476 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 527 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
477 if (!r) 528 sizeof(*config_page));
478 memcpy(config_page, mem.config_page,
479 min_t(u16, mem.config_page_sz,
480 sizeof(Mpi2BiosPage2_t)));
481
482 if (mem.config_page_sz > ioc->config_page_sz)
483 _config_free_config_dma_memory(ioc, &mem);
484
485 out: 529 out:
486 return r; 530 return r;
487} 531}
@@ -501,9 +545,7 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
501{ 545{
502 Mpi2ConfigRequest_t mpi_request; 546 Mpi2ConfigRequest_t mpi_request;
503 int r; 547 int r;
504 struct config_request mem;
505 548
506 memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
507 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 549 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
508 mpi_request.Function = MPI2_FUNCTION_CONFIG; 550 mpi_request.Function = MPI2_FUNCTION_CONFIG;
509 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 551 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -512,37 +554,14 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
512 mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; 554 mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
513 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 555 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
514 r = _config_request(ioc, &mpi_request, mpi_reply, 556 r = _config_request(ioc, &mpi_request, mpi_reply,
515 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 557 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
516 if (r) 558 if (r)
517 goto out; 559 goto out;
518 560
519 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 561 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
520 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
521 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
522 mpi_request.Header.PageType = mpi_reply->Header.PageType;
523 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
524 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
525 if (mem.config_page_sz > ioc->config_page_sz) {
526 r = _config_alloc_config_dma_memory(ioc, &mem);
527 if (r)
528 goto out;
529 } else {
530 mem.config_page_dma = ioc->config_page_dma;
531 mem.config_page = ioc->config_page;
532 }
533 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
534 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
535 mem.config_page_dma);
536 r = _config_request(ioc, &mpi_request, mpi_reply, 562 r = _config_request(ioc, &mpi_request, mpi_reply,
537 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 563 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
538 if (!r) 564 sizeof(*config_page));
539 memcpy(config_page, mem.config_page,
540 min_t(u16, mem.config_page_sz,
541 sizeof(Mpi2BiosPage3_t)));
542
543 if (mem.config_page_sz > ioc->config_page_sz)
544 _config_free_config_dma_memory(ioc, &mem);
545
546 out: 565 out:
547 return r; 566 return r;
548} 567}
@@ -562,9 +581,7 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
562{ 581{
563 Mpi2ConfigRequest_t mpi_request; 582 Mpi2ConfigRequest_t mpi_request;
564 int r; 583 int r;
565 struct config_request mem;
566 584
567 memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
568 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 585 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
569 mpi_request.Function = MPI2_FUNCTION_CONFIG; 586 mpi_request.Function = MPI2_FUNCTION_CONFIG;
570 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 587 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -573,37 +590,14 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
573 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; 590 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
574 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 591 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
575 r = _config_request(ioc, &mpi_request, mpi_reply, 592 r = _config_request(ioc, &mpi_request, mpi_reply,
576 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 593 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
577 if (r) 594 if (r)
578 goto out; 595 goto out;
579 596
580 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 597 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
581 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
582 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
583 mpi_request.Header.PageType = mpi_reply->Header.PageType;
584 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
585 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
586 if (mem.config_page_sz > ioc->config_page_sz) {
587 r = _config_alloc_config_dma_memory(ioc, &mem);
588 if (r)
589 goto out;
590 } else {
591 mem.config_page_dma = ioc->config_page_dma;
592 mem.config_page = ioc->config_page;
593 }
594 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
595 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
596 mem.config_page_dma);
597 r = _config_request(ioc, &mpi_request, mpi_reply, 598 r = _config_request(ioc, &mpi_request, mpi_reply,
598 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 599 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
599 if (!r) 600 sizeof(*config_page));
600 memcpy(config_page, mem.config_page,
601 min_t(u16, mem.config_page_sz,
602 sizeof(Mpi2IOUnitPage0_t)));
603
604 if (mem.config_page_sz > ioc->config_page_sz)
605 _config_free_config_dma_memory(ioc, &mem);
606
607 out: 601 out:
608 return r; 602 return r;
609} 603}
@@ -623,9 +617,7 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
623{ 617{
624 Mpi2ConfigRequest_t mpi_request; 618 Mpi2ConfigRequest_t mpi_request;
625 int r; 619 int r;
626 struct config_request mem;
627 620
628 memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
629 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 621 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
630 mpi_request.Function = MPI2_FUNCTION_CONFIG; 622 mpi_request.Function = MPI2_FUNCTION_CONFIG;
631 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 623 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -634,37 +626,14 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
634 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; 626 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
635 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 627 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
636 r = _config_request(ioc, &mpi_request, mpi_reply, 628 r = _config_request(ioc, &mpi_request, mpi_reply,
637 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 629 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
638 if (r) 630 if (r)
639 goto out; 631 goto out;
640 632
641 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 633 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
642 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
643 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
644 mpi_request.Header.PageType = mpi_reply->Header.PageType;
645 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
646 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
647 if (mem.config_page_sz > ioc->config_page_sz) {
648 r = _config_alloc_config_dma_memory(ioc, &mem);
649 if (r)
650 goto out;
651 } else {
652 mem.config_page_dma = ioc->config_page_dma;
653 mem.config_page = ioc->config_page;
654 }
655 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
656 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
657 mem.config_page_dma);
658 r = _config_request(ioc, &mpi_request, mpi_reply, 634 r = _config_request(ioc, &mpi_request, mpi_reply,
659 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 635 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
660 if (!r) 636 sizeof(*config_page));
661 memcpy(config_page, mem.config_page,
662 min_t(u16, mem.config_page_sz,
663 sizeof(Mpi2IOUnitPage1_t)));
664
665 if (mem.config_page_sz > ioc->config_page_sz)
666 _config_free_config_dma_memory(ioc, &mem);
667
668 out: 637 out:
669 return r; 638 return r;
670} 639}
@@ -680,11 +649,10 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
680 */ 649 */
681int 650int
682mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, 651mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
683 Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t config_page) 652 Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
684{ 653{
685 Mpi2ConfigRequest_t mpi_request; 654 Mpi2ConfigRequest_t mpi_request;
686 int r; 655 int r;
687 struct config_request mem;
688 656
689 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 657 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
690 mpi_request.Function = MPI2_FUNCTION_CONFIG; 658 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -694,38 +662,14 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
694 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; 662 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
695 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 663 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
696 r = _config_request(ioc, &mpi_request, mpi_reply, 664 r = _config_request(ioc, &mpi_request, mpi_reply,
697 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 665 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
698 if (r) 666 if (r)
699 goto out; 667 goto out;
700 668
701 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; 669 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
702 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
703 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
704 mpi_request.Header.PageType = mpi_reply->Header.PageType;
705 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
706 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
707 if (mem.config_page_sz > ioc->config_page_sz) {
708 r = _config_alloc_config_dma_memory(ioc, &mem);
709 if (r)
710 goto out;
711 } else {
712 mem.config_page_dma = ioc->config_page_dma;
713 mem.config_page = ioc->config_page;
714 }
715
716 memset(mem.config_page, 0, mem.config_page_sz);
717 memcpy(mem.config_page, &config_page,
718 sizeof(Mpi2IOUnitPage1_t));
719
720 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
721 MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.config_page_sz,
722 mem.config_page_dma);
723 r = _config_request(ioc, &mpi_request, mpi_reply, 670 r = _config_request(ioc, &mpi_request, mpi_reply,
724 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 671 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
725 672 sizeof(*config_page));
726 if (mem.config_page_sz > ioc->config_page_sz)
727 _config_free_config_dma_memory(ioc, &mem);
728
729 out: 673 out:
730 return r; 674 return r;
731} 675}
@@ -745,9 +689,7 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
745{ 689{
746 Mpi2ConfigRequest_t mpi_request; 690 Mpi2ConfigRequest_t mpi_request;
747 int r; 691 int r;
748 struct config_request mem;
749 692
750 memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
751 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 693 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
752 mpi_request.Function = MPI2_FUNCTION_CONFIG; 694 mpi_request.Function = MPI2_FUNCTION_CONFIG;
753 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 695 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -756,37 +698,14 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
756 mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; 698 mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
757 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 699 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
758 r = _config_request(ioc, &mpi_request, mpi_reply, 700 r = _config_request(ioc, &mpi_request, mpi_reply,
759 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 701 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
760 if (r) 702 if (r)
761 goto out; 703 goto out;
762 704
763 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 705 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
764 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
765 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
766 mpi_request.Header.PageType = mpi_reply->Header.PageType;
767 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
768 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
769 if (mem.config_page_sz > ioc->config_page_sz) {
770 r = _config_alloc_config_dma_memory(ioc, &mem);
771 if (r)
772 goto out;
773 } else {
774 mem.config_page_dma = ioc->config_page_dma;
775 mem.config_page = ioc->config_page;
776 }
777 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
778 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
779 mem.config_page_dma);
780 r = _config_request(ioc, &mpi_request, mpi_reply, 706 r = _config_request(ioc, &mpi_request, mpi_reply,
781 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 707 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
782 if (!r) 708 sizeof(*config_page));
783 memcpy(config_page, mem.config_page,
784 min_t(u16, mem.config_page_sz,
785 sizeof(Mpi2IOCPage8_t)));
786
787 if (mem.config_page_sz > ioc->config_page_sz)
788 _config_free_config_dma_memory(ioc, &mem);
789
790 out: 709 out:
791 return r; 710 return r;
792} 711}
@@ -808,9 +727,7 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
808{ 727{
809 Mpi2ConfigRequest_t mpi_request; 728 Mpi2ConfigRequest_t mpi_request;
810 int r; 729 int r;
811 struct config_request mem;
812 730
813 memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
814 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 731 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
815 mpi_request.Function = MPI2_FUNCTION_CONFIG; 732 mpi_request.Function = MPI2_FUNCTION_CONFIG;
816 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 733 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -820,39 +737,15 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
820 mpi_request.Header.PageNumber = 0; 737 mpi_request.Header.PageNumber = 0;
821 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 738 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
822 r = _config_request(ioc, &mpi_request, mpi_reply, 739 r = _config_request(ioc, &mpi_request, mpi_reply,
823 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 740 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
824 if (r) 741 if (r)
825 goto out; 742 goto out;
826 743
827 mpi_request.PageAddress = cpu_to_le32(form | handle); 744 mpi_request.PageAddress = cpu_to_le32(form | handle);
828 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 745 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
829 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
830 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
831 mpi_request.Header.PageType = mpi_reply->Header.PageType;
832 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
833 mpi_request.ExtPageType = mpi_reply->ExtPageType;
834 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
835 if (mem.config_page_sz > ioc->config_page_sz) {
836 r = _config_alloc_config_dma_memory(ioc, &mem);
837 if (r)
838 goto out;
839 } else {
840 mem.config_page_dma = ioc->config_page_dma;
841 mem.config_page = ioc->config_page;
842 }
843 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
844 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
845 mem.config_page_dma);
846 r = _config_request(ioc, &mpi_request, mpi_reply, 746 r = _config_request(ioc, &mpi_request, mpi_reply,
847 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 747 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
848 if (!r) 748 sizeof(*config_page));
849 memcpy(config_page, mem.config_page,
850 min_t(u16, mem.config_page_sz,
851 sizeof(Mpi2SasDevicePage0_t)));
852
853 if (mem.config_page_sz > ioc->config_page_sz)
854 _config_free_config_dma_memory(ioc, &mem);
855
856 out: 749 out:
857 return r; 750 return r;
858} 751}
@@ -874,9 +767,7 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
874{ 767{
875 Mpi2ConfigRequest_t mpi_request; 768 Mpi2ConfigRequest_t mpi_request;
876 int r; 769 int r;
877 struct config_request mem;
878 770
879 memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
880 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 771 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
881 mpi_request.Function = MPI2_FUNCTION_CONFIG; 772 mpi_request.Function = MPI2_FUNCTION_CONFIG;
882 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 773 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -886,39 +777,15 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
886 mpi_request.Header.PageNumber = 1; 777 mpi_request.Header.PageNumber = 1;
887 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 778 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
888 r = _config_request(ioc, &mpi_request, mpi_reply, 779 r = _config_request(ioc, &mpi_request, mpi_reply,
889 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 780 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
890 if (r) 781 if (r)
891 goto out; 782 goto out;
892 783
893 mpi_request.PageAddress = cpu_to_le32(form | handle); 784 mpi_request.PageAddress = cpu_to_le32(form | handle);
894 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 785 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
895 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
896 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
897 mpi_request.Header.PageType = mpi_reply->Header.PageType;
898 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
899 mpi_request.ExtPageType = mpi_reply->ExtPageType;
900 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
901 if (mem.config_page_sz > ioc->config_page_sz) {
902 r = _config_alloc_config_dma_memory(ioc, &mem);
903 if (r)
904 goto out;
905 } else {
906 mem.config_page_dma = ioc->config_page_dma;
907 mem.config_page = ioc->config_page;
908 }
909 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
910 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
911 mem.config_page_dma);
912 r = _config_request(ioc, &mpi_request, mpi_reply, 786 r = _config_request(ioc, &mpi_request, mpi_reply,
913 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 787 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
914 if (!r) 788 sizeof(*config_page));
915 memcpy(config_page, mem.config_page,
916 min_t(u16, mem.config_page_sz,
917 sizeof(Mpi2SasDevicePage1_t)));
918
919 if (mem.config_page_sz > ioc->config_page_sz)
920 _config_free_config_dma_memory(ioc, &mem);
921
922 out: 789 out:
923 return r; 790 return r;
924} 791}
@@ -936,11 +803,11 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
936{ 803{
937 Mpi2ConfigRequest_t mpi_request; 804 Mpi2ConfigRequest_t mpi_request;
938 int r; 805 int r;
939 struct config_request mem;
940 u16 ioc_status; 806 u16 ioc_status;
941 Mpi2ConfigReply_t mpi_reply; 807 Mpi2ConfigReply_t mpi_reply;
942 Mpi2SasIOUnitPage0_t config_page; 808 Mpi2SasIOUnitPage0_t config_page;
943 809
810 *num_phys = 0;
944 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 811 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
945 mpi_request.Function = MPI2_FUNCTION_CONFIG; 812 mpi_request.Function = MPI2_FUNCTION_CONFIG;
946 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 813 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -950,44 +817,20 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
950 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; 817 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
951 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 818 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
952 r = _config_request(ioc, &mpi_request, &mpi_reply, 819 r = _config_request(ioc, &mpi_request, &mpi_reply,
953 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 820 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
954 if (r) 821 if (r)
955 goto out; 822 goto out;
956 823
957 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 824 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
958 mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
959 mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
960 mpi_request.Header.PageType = mpi_reply.Header.PageType;
961 mpi_request.ExtPageLength = mpi_reply.ExtPageLength;
962 mpi_request.ExtPageType = mpi_reply.ExtPageType;
963 mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4;
964 if (mem.config_page_sz > ioc->config_page_sz) {
965 r = _config_alloc_config_dma_memory(ioc, &mem);
966 if (r)
967 goto out;
968 } else {
969 mem.config_page_dma = ioc->config_page_dma;
970 mem.config_page = ioc->config_page;
971 }
972 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
973 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
974 mem.config_page_dma);
975 r = _config_request(ioc, &mpi_request, &mpi_reply, 825 r = _config_request(ioc, &mpi_request, &mpi_reply,
976 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 826 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
827 sizeof(Mpi2SasIOUnitPage0_t));
977 if (!r) { 828 if (!r) {
978 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 829 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
979 MPI2_IOCSTATUS_MASK; 830 MPI2_IOCSTATUS_MASK;
980 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 831 if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
981 memcpy(&config_page, mem.config_page,
982 min_t(u16, mem.config_page_sz,
983 sizeof(Mpi2SasIOUnitPage0_t)));
984 *num_phys = config_page.NumPhys; 832 *num_phys = config_page.NumPhys;
985 }
986 } 833 }
987
988 if (mem.config_page_sz > ioc->config_page_sz)
989 _config_free_config_dma_memory(ioc, &mem);
990
991 out: 834 out:
992 return r; 835 return r;
993} 836}
@@ -1011,8 +854,7 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1011{ 854{
1012 Mpi2ConfigRequest_t mpi_request; 855 Mpi2ConfigRequest_t mpi_request;
1013 int r; 856 int r;
1014 struct config_request mem; 857
1015 memset(config_page, 0, sz);
1016 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 858 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1017 mpi_request.Function = MPI2_FUNCTION_CONFIG; 859 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1018 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 860 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1022,37 +864,13 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1022 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; 864 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
1023 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 865 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1024 r = _config_request(ioc, &mpi_request, mpi_reply, 866 r = _config_request(ioc, &mpi_request, mpi_reply,
1025 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 867 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1026 if (r) 868 if (r)
1027 goto out; 869 goto out;
1028 870
1029 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 871 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1030 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1031 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1032 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1033 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1034 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1035 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1036 if (mem.config_page_sz > ioc->config_page_sz) {
1037 r = _config_alloc_config_dma_memory(ioc, &mem);
1038 if (r)
1039 goto out;
1040 } else {
1041 mem.config_page_dma = ioc->config_page_dma;
1042 mem.config_page = ioc->config_page;
1043 }
1044 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1045 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1046 mem.config_page_dma);
1047 r = _config_request(ioc, &mpi_request, mpi_reply, 872 r = _config_request(ioc, &mpi_request, mpi_reply,
1048 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 873 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
1049 if (!r)
1050 memcpy(config_page, mem.config_page,
1051 min_t(u16, sz, mem.config_page_sz));
1052
1053 if (mem.config_page_sz > ioc->config_page_sz)
1054 _config_free_config_dma_memory(ioc, &mem);
1055
1056 out: 874 out:
1057 return r; 875 return r;
1058} 876}
@@ -1076,9 +894,7 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1076{ 894{
1077 Mpi2ConfigRequest_t mpi_request; 895 Mpi2ConfigRequest_t mpi_request;
1078 int r; 896 int r;
1079 struct config_request mem;
1080 897
1081 memset(config_page, 0, sz);
1082 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 898 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1083 mpi_request.Function = MPI2_FUNCTION_CONFIG; 899 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1084 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 900 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1088,37 +904,13 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1088 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; 904 mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
1089 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 905 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1090 r = _config_request(ioc, &mpi_request, mpi_reply, 906 r = _config_request(ioc, &mpi_request, mpi_reply,
1091 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 907 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1092 if (r) 908 if (r)
1093 goto out; 909 goto out;
1094 910
1095 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 911 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1096 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1097 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1098 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1099 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1100 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1101 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1102 if (mem.config_page_sz > ioc->config_page_sz) {
1103 r = _config_alloc_config_dma_memory(ioc, &mem);
1104 if (r)
1105 goto out;
1106 } else {
1107 mem.config_page_dma = ioc->config_page_dma;
1108 mem.config_page = ioc->config_page;
1109 }
1110 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1111 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1112 mem.config_page_dma);
1113 r = _config_request(ioc, &mpi_request, mpi_reply, 912 r = _config_request(ioc, &mpi_request, mpi_reply,
1114 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 913 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
1115 if (!r)
1116 memcpy(config_page, mem.config_page,
1117 min_t(u16, sz, mem.config_page_sz));
1118
1119 if (mem.config_page_sz > ioc->config_page_sz)
1120 _config_free_config_dma_memory(ioc, &mem);
1121
1122 out: 914 out:
1123 return r; 915 return r;
1124} 916}
@@ -1140,9 +932,7 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1140{ 932{
1141 Mpi2ConfigRequest_t mpi_request; 933 Mpi2ConfigRequest_t mpi_request;
1142 int r; 934 int r;
1143 struct config_request mem;
1144 935
1145 memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
1146 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 936 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1147 mpi_request.Function = MPI2_FUNCTION_CONFIG; 937 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1148 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 938 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1152,39 +942,15 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1152 mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; 942 mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
1153 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 943 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1154 r = _config_request(ioc, &mpi_request, mpi_reply, 944 r = _config_request(ioc, &mpi_request, mpi_reply,
1155 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 945 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1156 if (r) 946 if (r)
1157 goto out; 947 goto out;
1158 948
1159 mpi_request.PageAddress = cpu_to_le32(form | handle); 949 mpi_request.PageAddress = cpu_to_le32(form | handle);
1160 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 950 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1161 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1162 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1163 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1164 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1165 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1166 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1167 if (mem.config_page_sz > ioc->config_page_sz) {
1168 r = _config_alloc_config_dma_memory(ioc, &mem);
1169 if (r)
1170 goto out;
1171 } else {
1172 mem.config_page_dma = ioc->config_page_dma;
1173 mem.config_page = ioc->config_page;
1174 }
1175 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1176 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1177 mem.config_page_dma);
1178 r = _config_request(ioc, &mpi_request, mpi_reply, 951 r = _config_request(ioc, &mpi_request, mpi_reply,
1179 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 952 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1180 if (!r) 953 sizeof(*config_page));
1181 memcpy(config_page, mem.config_page,
1182 min_t(u16, mem.config_page_sz,
1183 sizeof(Mpi2ExpanderPage0_t)));
1184
1185 if (mem.config_page_sz > ioc->config_page_sz)
1186 _config_free_config_dma_memory(ioc, &mem);
1187
1188 out: 954 out:
1189 return r; 955 return r;
1190} 956}
@@ -1207,9 +973,7 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1207{ 973{
1208 Mpi2ConfigRequest_t mpi_request; 974 Mpi2ConfigRequest_t mpi_request;
1209 int r; 975 int r;
1210 struct config_request mem;
1211 976
1212 memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
1213 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 977 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1214 mpi_request.Function = MPI2_FUNCTION_CONFIG; 978 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1215 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 979 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1219,7 +983,7 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1219 mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; 983 mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
1220 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 984 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1221 r = _config_request(ioc, &mpi_request, mpi_reply, 985 r = _config_request(ioc, &mpi_request, mpi_reply,
1222 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 986 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1223 if (r) 987 if (r)
1224 goto out; 988 goto out;
1225 989
@@ -1227,33 +991,9 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1227 cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | 991 cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
1228 (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); 992 (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
1229 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 993 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1230 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1231 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1232 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1233 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1234 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1235 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1236 if (mem.config_page_sz > ioc->config_page_sz) {
1237 r = _config_alloc_config_dma_memory(ioc, &mem);
1238 if (r)
1239 goto out;
1240 } else {
1241 mem.config_page_dma = ioc->config_page_dma;
1242 mem.config_page = ioc->config_page;
1243 }
1244 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1245 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1246 mem.config_page_dma);
1247 r = _config_request(ioc, &mpi_request, mpi_reply, 994 r = _config_request(ioc, &mpi_request, mpi_reply,
1248 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 995 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1249 if (!r) 996 sizeof(*config_page));
1250 memcpy(config_page, mem.config_page,
1251 min_t(u16, mem.config_page_sz,
1252 sizeof(Mpi2ExpanderPage1_t)));
1253
1254 if (mem.config_page_sz > ioc->config_page_sz)
1255 _config_free_config_dma_memory(ioc, &mem);
1256
1257 out: 997 out:
1258 return r; 998 return r;
1259} 999}
@@ -1275,9 +1015,7 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1275{ 1015{
1276 Mpi2ConfigRequest_t mpi_request; 1016 Mpi2ConfigRequest_t mpi_request;
1277 int r; 1017 int r;
1278 struct config_request mem;
1279 1018
1280 memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
1281 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1019 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1282 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1020 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1283 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 1021 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1287,39 +1025,15 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1287 mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; 1025 mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
1288 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1026 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1289 r = _config_request(ioc, &mpi_request, mpi_reply, 1027 r = _config_request(ioc, &mpi_request, mpi_reply,
1290 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1028 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1291 if (r) 1029 if (r)
1292 goto out; 1030 goto out;
1293 1031
1294 mpi_request.PageAddress = cpu_to_le32(form | handle); 1032 mpi_request.PageAddress = cpu_to_le32(form | handle);
1295 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1033 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1296 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1297 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1298 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1299 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1300 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1301 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1302 if (mem.config_page_sz > ioc->config_page_sz) {
1303 r = _config_alloc_config_dma_memory(ioc, &mem);
1304 if (r)
1305 goto out;
1306 } else {
1307 mem.config_page_dma = ioc->config_page_dma;
1308 mem.config_page = ioc->config_page;
1309 }
1310 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1311 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1312 mem.config_page_dma);
1313 r = _config_request(ioc, &mpi_request, mpi_reply, 1034 r = _config_request(ioc, &mpi_request, mpi_reply,
1314 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1035 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1315 if (!r) 1036 sizeof(*config_page));
1316 memcpy(config_page, mem.config_page,
1317 min_t(u16, mem.config_page_sz,
1318 sizeof(Mpi2SasEnclosurePage0_t)));
1319
1320 if (mem.config_page_sz > ioc->config_page_sz)
1321 _config_free_config_dma_memory(ioc, &mem);
1322
1323 out: 1037 out:
1324 return r; 1038 return r;
1325} 1039}
@@ -1340,9 +1054,7 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1340{ 1054{
1341 Mpi2ConfigRequest_t mpi_request; 1055 Mpi2ConfigRequest_t mpi_request;
1342 int r; 1056 int r;
1343 struct config_request mem;
1344 1057
1345 memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
1346 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1058 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1347 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1059 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1348 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 1060 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1352,40 +1064,16 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1352 mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; 1064 mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
1353 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1065 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1354 r = _config_request(ioc, &mpi_request, mpi_reply, 1066 r = _config_request(ioc, &mpi_request, mpi_reply,
1355 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1067 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1356 if (r) 1068 if (r)
1357 goto out; 1069 goto out;
1358 1070
1359 mpi_request.PageAddress = 1071 mpi_request.PageAddress =
1360 cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); 1072 cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
1361 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1073 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1362 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1363 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1364 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1365 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1366 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1367 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1368 if (mem.config_page_sz > ioc->config_page_sz) {
1369 r = _config_alloc_config_dma_memory(ioc, &mem);
1370 if (r)
1371 goto out;
1372 } else {
1373 mem.config_page_dma = ioc->config_page_dma;
1374 mem.config_page = ioc->config_page;
1375 }
1376 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1377 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1378 mem.config_page_dma);
1379 r = _config_request(ioc, &mpi_request, mpi_reply, 1074 r = _config_request(ioc, &mpi_request, mpi_reply,
1380 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1075 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1381 if (!r) 1076 sizeof(*config_page));
1382 memcpy(config_page, mem.config_page,
1383 min_t(u16, mem.config_page_sz,
1384 sizeof(Mpi2SasPhyPage0_t)));
1385
1386 if (mem.config_page_sz > ioc->config_page_sz)
1387 _config_free_config_dma_memory(ioc, &mem);
1388
1389 out: 1077 out:
1390 return r; 1078 return r;
1391} 1079}
@@ -1406,9 +1094,7 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1406{ 1094{
1407 Mpi2ConfigRequest_t mpi_request; 1095 Mpi2ConfigRequest_t mpi_request;
1408 int r; 1096 int r;
1409 struct config_request mem;
1410 1097
1411 memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
1412 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1098 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1413 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1099 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1414 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 1100 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1418,40 +1104,16 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1418 mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; 1104 mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
1419 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1105 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1420 r = _config_request(ioc, &mpi_request, mpi_reply, 1106 r = _config_request(ioc, &mpi_request, mpi_reply,
1421 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1107 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1422 if (r) 1108 if (r)
1423 goto out; 1109 goto out;
1424 1110
1425 mpi_request.PageAddress = 1111 mpi_request.PageAddress =
1426 cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); 1112 cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
1427 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1113 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1428 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1429 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1430 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1431 mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
1432 mpi_request.ExtPageType = mpi_reply->ExtPageType;
1433 mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
1434 if (mem.config_page_sz > ioc->config_page_sz) {
1435 r = _config_alloc_config_dma_memory(ioc, &mem);
1436 if (r)
1437 goto out;
1438 } else {
1439 mem.config_page_dma = ioc->config_page_dma;
1440 mem.config_page = ioc->config_page;
1441 }
1442 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1443 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1444 mem.config_page_dma);
1445 r = _config_request(ioc, &mpi_request, mpi_reply, 1114 r = _config_request(ioc, &mpi_request, mpi_reply,
1446 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1115 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1447 if (!r) 1116 sizeof(*config_page));
1448 memcpy(config_page, mem.config_page,
1449 min_t(u16, mem.config_page_sz,
1450 sizeof(Mpi2SasPhyPage1_t)));
1451
1452 if (mem.config_page_sz > ioc->config_page_sz)
1453 _config_free_config_dma_memory(ioc, &mem);
1454
1455 out: 1117 out:
1456 return r; 1118 return r;
1457} 1119}
@@ -1474,9 +1136,7 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
1474{ 1136{
1475 Mpi2ConfigRequest_t mpi_request; 1137 Mpi2ConfigRequest_t mpi_request;
1476 int r; 1138 int r;
1477 struct config_request mem;
1478 1139
1479 memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
1480 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1140 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1481 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1141 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1482 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 1142 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1485,38 +1145,15 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
1485 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; 1145 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
1486 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1146 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1487 r = _config_request(ioc, &mpi_request, mpi_reply, 1147 r = _config_request(ioc, &mpi_request, mpi_reply,
1488 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1148 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1489 if (r) 1149 if (r)
1490 goto out; 1150 goto out;
1491 1151
1492 mpi_request.PageAddress = cpu_to_le32(form | handle); 1152 mpi_request.PageAddress = cpu_to_le32(form | handle);
1493 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1153 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1494 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1495 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1496 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1497 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
1498 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
1499 if (mem.config_page_sz > ioc->config_page_sz) {
1500 r = _config_alloc_config_dma_memory(ioc, &mem);
1501 if (r)
1502 goto out;
1503 } else {
1504 mem.config_page_dma = ioc->config_page_dma;
1505 mem.config_page = ioc->config_page;
1506 }
1507 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1508 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1509 mem.config_page_dma);
1510 r = _config_request(ioc, &mpi_request, mpi_reply, 1154 r = _config_request(ioc, &mpi_request, mpi_reply,
1511 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1155 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1512 if (!r) 1156 sizeof(*config_page));
1513 memcpy(config_page, mem.config_page,
1514 min_t(u16, mem.config_page_sz,
1515 sizeof(Mpi2RaidVolPage1_t)));
1516
1517 if (mem.config_page_sz > ioc->config_page_sz)
1518 _config_free_config_dma_memory(ioc, &mem);
1519
1520 out: 1157 out:
1521 return r; 1158 return r;
1522} 1159}
@@ -1535,10 +1172,9 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1535 u8 *num_pds) 1172 u8 *num_pds)
1536{ 1173{
1537 Mpi2ConfigRequest_t mpi_request; 1174 Mpi2ConfigRequest_t mpi_request;
1538 Mpi2RaidVolPage0_t *config_page; 1175 Mpi2RaidVolPage0_t config_page;
1539 Mpi2ConfigReply_t mpi_reply; 1176 Mpi2ConfigReply_t mpi_reply;
1540 int r; 1177 int r;
1541 struct config_request mem;
1542 u16 ioc_status; 1178 u16 ioc_status;
1543 1179
1544 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1180 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
@@ -1550,43 +1186,23 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1550 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; 1186 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
1551 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1187 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1552 r = _config_request(ioc, &mpi_request, &mpi_reply, 1188 r = _config_request(ioc, &mpi_request, &mpi_reply,
1553 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1189 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1554 if (r) 1190 if (r)
1555 goto out; 1191 goto out;
1556 1192
1557 mpi_request.PageAddress = 1193 mpi_request.PageAddress =
1558 cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); 1194 cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
1559 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1195 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1560 mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
1561 mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
1562 mpi_request.Header.PageType = mpi_reply.Header.PageType;
1563 mpi_request.Header.PageLength = mpi_reply.Header.PageLength;
1564 mem.config_page_sz = le16_to_cpu(mpi_reply.Header.PageLength) * 4;
1565 if (mem.config_page_sz > ioc->config_page_sz) {
1566 r = _config_alloc_config_dma_memory(ioc, &mem);
1567 if (r)
1568 goto out;
1569 } else {
1570 mem.config_page_dma = ioc->config_page_dma;
1571 mem.config_page = ioc->config_page;
1572 }
1573 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1574 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1575 mem.config_page_dma);
1576 r = _config_request(ioc, &mpi_request, &mpi_reply, 1196 r = _config_request(ioc, &mpi_request, &mpi_reply,
1577 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1197 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
1198 sizeof(Mpi2RaidVolPage0_t));
1578 if (!r) { 1199 if (!r) {
1579 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 1200 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1580 MPI2_IOCSTATUS_MASK; 1201 MPI2_IOCSTATUS_MASK;
1581 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1202 if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
1582 config_page = mem.config_page; 1203 *num_pds = config_page.NumPhysDisks;
1583 *num_pds = config_page->NumPhysDisks;
1584 }
1585 } 1204 }
1586 1205
1587 if (mem.config_page_sz > ioc->config_page_sz)
1588 _config_free_config_dma_memory(ioc, &mem);
1589
1590 out: 1206 out:
1591 return r; 1207 return r;
1592} 1208}
@@ -1610,10 +1226,8 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
1610{ 1226{
1611 Mpi2ConfigRequest_t mpi_request; 1227 Mpi2ConfigRequest_t mpi_request;
1612 int r; 1228 int r;
1613 struct config_request mem;
1614 1229
1615 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1230 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1616 memset(config_page, 0, sz);
1617 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1231 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1618 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 1232 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1619 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; 1233 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
@@ -1621,37 +1235,14 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
1621 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; 1235 mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
1622 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1236 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1623 r = _config_request(ioc, &mpi_request, mpi_reply, 1237 r = _config_request(ioc, &mpi_request, mpi_reply,
1624 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1238 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1625 if (r) 1239 if (r)
1626 goto out; 1240 goto out;
1627 1241
1628 mpi_request.PageAddress = cpu_to_le32(form | handle); 1242 mpi_request.PageAddress = cpu_to_le32(form | handle);
1629 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1243 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1630 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1631 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1632 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1633 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
1634 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
1635 if (mem.config_page_sz > ioc->config_page_sz) {
1636 r = _config_alloc_config_dma_memory(ioc, &mem);
1637 if (r)
1638 goto out;
1639 } else {
1640 mem.config_page_dma = ioc->config_page_dma;
1641 mem.config_page = ioc->config_page;
1642 }
1643 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1644 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1645 mem.config_page_dma);
1646 r = _config_request(ioc, &mpi_request, mpi_reply, 1244 r = _config_request(ioc, &mpi_request, mpi_reply,
1647 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1245 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
1648 if (!r)
1649 memcpy(config_page, mem.config_page,
1650 min_t(u16, sz, mem.config_page_sz));
1651
1652 if (mem.config_page_sz > ioc->config_page_sz)
1653 _config_free_config_dma_memory(ioc, &mem);
1654
1655 out: 1246 out:
1656 return r; 1247 return r;
1657} 1248}
@@ -1674,10 +1265,8 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1674{ 1265{
1675 Mpi2ConfigRequest_t mpi_request; 1266 Mpi2ConfigRequest_t mpi_request;
1676 int r; 1267 int r;
1677 struct config_request mem;
1678 1268
1679 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1269 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1680 memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
1681 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1270 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1682 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 1271 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1683 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; 1272 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
@@ -1685,38 +1274,15 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1685 mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; 1274 mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
1686 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1275 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1687 r = _config_request(ioc, &mpi_request, mpi_reply, 1276 r = _config_request(ioc, &mpi_request, mpi_reply,
1688 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1277 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1689 if (r) 1278 if (r)
1690 goto out; 1279 goto out;
1691 1280
1692 mpi_request.PageAddress = cpu_to_le32(form | form_specific); 1281 mpi_request.PageAddress = cpu_to_le32(form | form_specific);
1693 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1282 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1694 mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
1695 mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
1696 mpi_request.Header.PageType = mpi_reply->Header.PageType;
1697 mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
1698 mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
1699 if (mem.config_page_sz > ioc->config_page_sz) {
1700 r = _config_alloc_config_dma_memory(ioc, &mem);
1701 if (r)
1702 goto out;
1703 } else {
1704 mem.config_page_dma = ioc->config_page_dma;
1705 mem.config_page = ioc->config_page;
1706 }
1707 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1708 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1709 mem.config_page_dma);
1710 r = _config_request(ioc, &mpi_request, mpi_reply, 1283 r = _config_request(ioc, &mpi_request, mpi_reply,
1711 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1284 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1712 if (!r) 1285 sizeof(*config_page));
1713 memcpy(config_page, mem.config_page,
1714 min_t(u16, mem.config_page_sz,
1715 sizeof(Mpi2RaidPhysDiskPage0_t)));
1716
1717 if (mem.config_page_sz > ioc->config_page_sz)
1718 _config_free_config_dma_memory(ioc, &mem);
1719
1720 out: 1286 out:
1721 return r; 1287 return r;
1722} 1288}
@@ -1734,11 +1300,10 @@ int
1734mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, 1300mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1735 u16 *volume_handle) 1301 u16 *volume_handle)
1736{ 1302{
1737 Mpi2RaidConfigurationPage0_t *config_page; 1303 Mpi2RaidConfigurationPage0_t *config_page = NULL;
1738 Mpi2ConfigRequest_t mpi_request; 1304 Mpi2ConfigRequest_t mpi_request;
1739 Mpi2ConfigReply_t mpi_reply; 1305 Mpi2ConfigReply_t mpi_reply;
1740 int r, i; 1306 int r, i, config_page_sz;
1741 struct config_request mem;
1742 u16 ioc_status; 1307 u16 ioc_status;
1743 1308
1744 *volume_handle = 0; 1309 *volume_handle = 0;
@@ -1751,40 +1316,27 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1751 mpi_request.Header.PageNumber = 0; 1316 mpi_request.Header.PageNumber = 0;
1752 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); 1317 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
1753 r = _config_request(ioc, &mpi_request, &mpi_reply, 1318 r = _config_request(ioc, &mpi_request, &mpi_reply,
1754 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1319 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1755 if (r) 1320 if (r)
1756 goto out; 1321 goto out;
1757 1322
1758 mpi_request.PageAddress = 1323 mpi_request.PageAddress =
1759 cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG); 1324 cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG);
1760 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; 1325 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1761 mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion; 1326 config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
1762 mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber; 1327 config_page = kmalloc(config_page_sz, GFP_KERNEL);
1763 mpi_request.Header.PageType = mpi_reply.Header.PageType; 1328 if (!config_page)
1764 mpi_request.ExtPageLength = mpi_reply.ExtPageLength; 1329 goto out;
1765 mpi_request.ExtPageType = mpi_reply.ExtPageType;
1766 mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4;
1767 if (mem.config_page_sz > ioc->config_page_sz) {
1768 r = _config_alloc_config_dma_memory(ioc, &mem);
1769 if (r)
1770 goto out;
1771 } else {
1772 mem.config_page_dma = ioc->config_page_dma;
1773 mem.config_page = ioc->config_page;
1774 }
1775 ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
1776 MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
1777 mem.config_page_dma);
1778 r = _config_request(ioc, &mpi_request, &mpi_reply, 1330 r = _config_request(ioc, &mpi_request, &mpi_reply,
1779 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT); 1331 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1332 config_page_sz);
1780 if (r) 1333 if (r)
1781 goto out; 1334 goto out;
1782 1335
1783 r = -1; 1336 r = -1;
1784 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 1337 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
1785 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 1338 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1786 goto done; 1339 goto out;
1787 config_page = mem.config_page;
1788 for (i = 0; i < config_page->NumElements; i++) { 1340 for (i = 0; i < config_page->NumElements; i++) {
1789 if ((config_page->ConfigElement[i].ElementFlags & 1341 if ((config_page->ConfigElement[i].ElementFlags &
1790 MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) != 1342 MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
@@ -1795,15 +1347,11 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1795 *volume_handle = le16_to_cpu(config_page-> 1347 *volume_handle = le16_to_cpu(config_page->
1796 ConfigElement[i].VolDevHandle); 1348 ConfigElement[i].VolDevHandle);
1797 r = 0; 1349 r = 0;
1798 goto done; 1350 goto out;
1799 } 1351 }
1800 } 1352 }
1801
1802 done:
1803 if (mem.config_page_sz > ioc->config_page_sz)
1804 _config_free_config_dma_memory(ioc, &mem);
1805
1806 out: 1353 out:
1354 kfree(config_page);
1807 return r; 1355 return r;
1808} 1356}
1809 1357
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 14e473d1fa7b..c2a51018910f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1963,7 +1963,6 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
1963{ 1963{
1964 enum block_state state; 1964 enum block_state state;
1965 long ret = -EINVAL; 1965 long ret = -EINVAL;
1966 unsigned long flags;
1967 1966
1968 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : 1967 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING :
1969 BLOCKING; 1968 BLOCKING;
@@ -1989,13 +1988,8 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
1989 !ioc) 1988 !ioc)
1990 return -ENODEV; 1989 return -ENODEV;
1991 1990
1992 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 1991 if (ioc->shost_recovery)
1993 if (ioc->shost_recovery) {
1994 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
1995 flags);
1996 return -EAGAIN; 1992 return -EAGAIN;
1997 }
1998 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1999 1993
2000 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) { 1994 if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
2001 uarg = arg; 1995 uarg = arg;
@@ -2098,7 +2092,6 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
2098 struct mpt2_ioctl_command karg; 2092 struct mpt2_ioctl_command karg;
2099 struct MPT2SAS_ADAPTER *ioc; 2093 struct MPT2SAS_ADAPTER *ioc;
2100 enum block_state state; 2094 enum block_state state;
2101 unsigned long flags;
2102 2095
2103 if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32)) 2096 if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
2104 return -EINVAL; 2097 return -EINVAL;
@@ -2113,13 +2106,8 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
2113 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc) 2106 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
2114 return -ENODEV; 2107 return -ENODEV;
2115 2108
2116 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 2109 if (ioc->shost_recovery)
2117 if (ioc->shost_recovery) {
2118 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
2119 flags);
2120 return -EAGAIN; 2110 return -EAGAIN;
2121 }
2122 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
2123 2111
2124 memset(&karg, 0, sizeof(struct mpt2_ioctl_command)); 2112 memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
2125 karg.hdr.ioc_number = karg32.hdr.ioc_number; 2113 karg.hdr.ioc_number = karg32.hdr.ioc_number;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 2e9a4445596f..774b34525bba 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -103,7 +103,6 @@ struct sense_info {
103}; 103};
104 104
105 105
106#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
107/** 106/**
108 * struct fw_event_work - firmware event struct 107 * struct fw_event_work - firmware event struct
109 * @list: link list framework 108 * @list: link list framework
@@ -1502,7 +1501,13 @@ _scsih_slave_configure(struct scsi_device *sdev)
1502 break; 1501 break;
1503 case MPI2_RAID_VOL_TYPE_RAID1E: 1502 case MPI2_RAID_VOL_TYPE_RAID1E:
1504 qdepth = MPT2SAS_RAID_QUEUE_DEPTH; 1503 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
1505 r_level = "RAID1E"; 1504 if (ioc->manu_pg10.OEMIdentifier &&
1505 (ioc->manu_pg10.GenericFlags0 &
1506 MFG10_GF0_R10_DISPLAY) &&
1507 !(raid_device->num_pds % 2))
1508 r_level = "RAID10";
1509 else
1510 r_level = "RAID1E";
1506 break; 1511 break;
1507 case MPI2_RAID_VOL_TYPE_RAID1: 1512 case MPI2_RAID_VOL_TYPE_RAID1:
1508 qdepth = MPT2SAS_RAID_QUEUE_DEPTH; 1513 qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
@@ -1786,17 +1791,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1786 u32 ioc_state; 1791 u32 ioc_state;
1787 unsigned long timeleft; 1792 unsigned long timeleft;
1788 u8 VF_ID = 0; 1793 u8 VF_ID = 0;
1789 unsigned long flags;
1790 1794
1791 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 1795 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
1792 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED || 1796 printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
1793 ioc->shost_recovery) { 1797 __func__, ioc->name);
1794 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 1798 return;
1799 }
1800
1801 if (ioc->shost_recovery) {
1795 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 1802 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1796 __func__, ioc->name); 1803 __func__, ioc->name);
1797 return; 1804 return;
1798 } 1805 }
1799 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1800 1806
1801 ioc_state = mpt2sas_base_get_iocstate(ioc, 0); 1807 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
1802 if (ioc_state & MPI2_DOORBELL_USED) { 1808 if (ioc_state & MPI2_DOORBELL_USED) {
@@ -1830,6 +1836,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1830 mpi_request->TaskMID = cpu_to_le16(smid_task); 1836 mpi_request->TaskMID = cpu_to_le16(smid_task);
1831 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 1837 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
1832 mpt2sas_scsih_set_tm_flag(ioc, handle); 1838 mpt2sas_scsih_set_tm_flag(ioc, handle);
1839 init_completion(&ioc->tm_cmds.done);
1833 mpt2sas_base_put_smid_hi_priority(ioc, smid, VF_ID); 1840 mpt2sas_base_put_smid_hi_priority(ioc, smid, VF_ID);
1834 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 1841 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
1835 mpt2sas_scsih_clear_tm_flag(ioc, handle); 1842 mpt2sas_scsih_clear_tm_flag(ioc, handle);
@@ -2222,7 +2229,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2222 MPT2SAS_INFO_FMT "SDEV_RUNNING: " 2229 MPT2SAS_INFO_FMT "SDEV_RUNNING: "
2223 "handle(0x%04x)\n", ioc->name, handle)); 2230 "handle(0x%04x)\n", ioc->name, handle));
2224 sas_device_priv_data->block = 0; 2231 sas_device_priv_data->block = 0;
2225 scsi_device_set_state(sdev, SDEV_RUNNING); 2232 scsi_internal_device_unblock(sdev);
2226 } 2233 }
2227 } 2234 }
2228} 2235}
@@ -2251,7 +2258,7 @@ _scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2251 MPT2SAS_INFO_FMT "SDEV_BLOCK: " 2258 MPT2SAS_INFO_FMT "SDEV_BLOCK: "
2252 "handle(0x%04x)\n", ioc->name, handle)); 2259 "handle(0x%04x)\n", ioc->name, handle));
2253 sas_device_priv_data->block = 1; 2260 sas_device_priv_data->block = 1;
2254 scsi_device_set_state(sdev, SDEV_BLOCK); 2261 scsi_internal_device_block(sdev);
2255 } 2262 }
2256 } 2263 }
2257} 2264}
@@ -2327,6 +2334,7 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
2327 u16 handle; 2334 u16 handle;
2328 u16 reason_code; 2335 u16 reason_code;
2329 u8 phy_number; 2336 u8 phy_number;
2337 u8 link_rate;
2330 2338
2331 for (i = 0; i < event_data->NumEntries; i++) { 2339 for (i = 0; i < event_data->NumEntries; i++) {
2332 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 2340 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
@@ -2337,6 +2345,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc,
2337 MPI2_EVENT_SAS_TOPO_RC_MASK; 2345 MPI2_EVENT_SAS_TOPO_RC_MASK;
2338 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 2346 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
2339 _scsih_block_io_device(ioc, handle); 2347 _scsih_block_io_device(ioc, handle);
2348 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
2349 link_rate = event_data->PHY[i].LinkRate >> 4;
2350 if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
2351 _scsih_ublock_io_device(ioc, handle);
2352 }
2340 } 2353 }
2341} 2354}
2342 2355
@@ -2405,27 +2418,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2405} 2418}
2406 2419
2407/** 2420/**
2408 * _scsih_queue_rescan - queue a topology rescan from user context
2409 * @ioc: per adapter object
2410 *
2411 * Return nothing.
2412 */
2413static void
2414_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
2415{
2416 struct fw_event_work *fw_event;
2417
2418 if (ioc->wait_for_port_enable_to_complete)
2419 return;
2420 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
2421 if (!fw_event)
2422 return;
2423 fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
2424 fw_event->ioc = ioc;
2425 _scsih_fw_event_add(ioc, fw_event);
2426}
2427
2428/**
2429 * _scsih_flush_running_cmds - completing outstanding commands. 2421 * _scsih_flush_running_cmds - completing outstanding commands.
2430 * @ioc: per adapter object 2422 * @ioc: per adapter object
2431 * 2423 *
@@ -2456,46 +2448,6 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
2456} 2448}
2457 2449
2458/** 2450/**
2459 * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
2460 * @ioc: per adapter object
2461 * @reset_phase: phase
2462 *
2463 * The handler for doing any required cleanup or initialization.
2464 *
2465 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
2466 * MPT2_IOC_DONE_RESET
2467 *
2468 * Return nothing.
2469 */
2470void
2471mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2472{
2473 switch (reset_phase) {
2474 case MPT2_IOC_PRE_RESET:
2475 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2476 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
2477 _scsih_fw_event_off(ioc);
2478 break;
2479 case MPT2_IOC_AFTER_RESET:
2480 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2481 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
2482 if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
2483 ioc->tm_cmds.status |= MPT2_CMD_RESET;
2484 mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
2485 complete(&ioc->tm_cmds.done);
2486 }
2487 _scsih_fw_event_on(ioc);
2488 _scsih_flush_running_cmds(ioc);
2489 break;
2490 case MPT2_IOC_DONE_RESET:
2491 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2492 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
2493 _scsih_queue_rescan(ioc);
2494 break;
2495 }
2496}
2497
2498/**
2499 * _scsih_setup_eedp - setup MPI request for EEDP transfer 2451 * _scsih_setup_eedp - setup MPI request for EEDP transfer
2500 * @scmd: pointer to scsi command object 2452 * @scmd: pointer to scsi command object
2501 * @mpi_request: pointer to the SCSI_IO reqest message frame 2453 * @mpi_request: pointer to the SCSI_IO reqest message frame
@@ -2615,7 +2567,6 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2615 Mpi2SCSIIORequest_t *mpi_request; 2567 Mpi2SCSIIORequest_t *mpi_request;
2616 u32 mpi_control; 2568 u32 mpi_control;
2617 u16 smid; 2569 u16 smid;
2618 unsigned long flags;
2619 2570
2620 scmd->scsi_done = done; 2571 scmd->scsi_done = done;
2621 sas_device_priv_data = scmd->device->hostdata; 2572 sas_device_priv_data = scmd->device->hostdata;
@@ -2634,13 +2585,10 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2634 } 2585 }
2635 2586
2636 /* see if we are busy with task managment stuff */ 2587 /* see if we are busy with task managment stuff */
2637 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 2588 if (sas_target_priv_data->tm_busy)
2638 if (sas_target_priv_data->tm_busy || 2589 return SCSI_MLQUEUE_DEVICE_BUSY;
2639 ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { 2590 else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
2640 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
2641 return SCSI_MLQUEUE_HOST_BUSY; 2591 return SCSI_MLQUEUE_HOST_BUSY;
2642 }
2643 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
2644 2592
2645 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 2593 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2646 mpi_control = MPI2_SCSIIO_CONTROL_READ; 2594 mpi_control = MPI2_SCSIIO_CONTROL_READ;
@@ -3189,25 +3137,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
3189} 3137}
3190 3138
3191/** 3139/**
3192 * _scsih_link_change - process phy link changes
3193 * @ioc: per adapter object
3194 * @handle: phy handle
3195 * @attached_handle: valid for devices attached to link
3196 * @phy_number: phy number
3197 * @link_rate: new link rate
3198 * Context: user.
3199 *
3200 * Return nothing.
3201 */
3202static void
3203_scsih_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle,
3204 u8 phy_number, u8 link_rate)
3205{
3206 mpt2sas_transport_update_phy_link_change(ioc, handle, attached_handle,
3207 phy_number, link_rate);
3208}
3209
3210/**
3211 * _scsih_sas_host_refresh - refreshing sas host object contents 3140 * _scsih_sas_host_refresh - refreshing sas host object contents
3212 * @ioc: per adapter object 3141 * @ioc: per adapter object
3213 * @update: update link information 3142 * @update: update link information
@@ -3251,7 +3180,8 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc, u8 update)
3251 le16_to_cpu(sas_iounit_pg0->PhyData[i]. 3180 le16_to_cpu(sas_iounit_pg0->PhyData[i].
3252 ControllerDevHandle); 3181 ControllerDevHandle);
3253 if (update) 3182 if (update)
3254 _scsih_link_change(ioc, 3183 mpt2sas_transport_update_links(
3184 ioc,
3255 ioc->sas_hba.phy[i].handle, 3185 ioc->sas_hba.phy[i].handle,
3256 le16_to_cpu(sas_iounit_pg0->PhyData[i]. 3186 le16_to_cpu(sas_iounit_pg0->PhyData[i].
3257 AttachedDevHandle), i, 3187 AttachedDevHandle), i,
@@ -3436,6 +3366,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3436 if (!handle) 3366 if (!handle)
3437 return -1; 3367 return -1;
3438 3368
3369 if (ioc->shost_recovery)
3370 return -1;
3371
3439 if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 3372 if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
3440 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 3373 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
3441 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 3374 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
@@ -3572,6 +3505,9 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3572 struct _sas_node *sas_expander; 3505 struct _sas_node *sas_expander;
3573 unsigned long flags; 3506 unsigned long flags;
3574 3507
3508 if (ioc->shost_recovery)
3509 return;
3510
3575 spin_lock_irqsave(&ioc->sas_node_lock, flags); 3511 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3576 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle); 3512 sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle);
3577 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 3513 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@@ -3743,6 +3679,8 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3743 mutex_unlock(&ioc->tm_cmds.mutex); 3679 mutex_unlock(&ioc->tm_cmds.mutex);
3744 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset " 3680 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
3745 "done: handle(0x%04x)\n", ioc->name, device_handle)); 3681 "done: handle(0x%04x)\n", ioc->name, device_handle));
3682 if (ioc->shost_recovery)
3683 goto out;
3746 } 3684 }
3747 3685
3748 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */ 3686 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
@@ -3765,6 +3703,9 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3765 le32_to_cpu(mpi_reply.IOCLogInfo))); 3703 le32_to_cpu(mpi_reply.IOCLogInfo)));
3766 3704
3767 out: 3705 out:
3706
3707 _scsih_ublock_io_device(ioc, handle);
3708
3768 mpt2sas_transport_port_remove(ioc, sas_device->sas_address, 3709 mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
3769 sas_device->parent_handle); 3710 sas_device->parent_handle);
3770 3711
@@ -3908,6 +3849,8 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3908 "expander event\n", ioc->name)); 3849 "expander event\n", ioc->name));
3909 return; 3850 return;
3910 } 3851 }
3852 if (ioc->shost_recovery)
3853 return;
3911 if (event_data->PHY[i].PhyStatus & 3854 if (event_data->PHY[i].PhyStatus &
3912 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) 3855 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
3913 continue; 3856 continue;
@@ -3923,9 +3866,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3923 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 3866 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
3924 if (!parent_handle) { 3867 if (!parent_handle) {
3925 if (phy_number < ioc->sas_hba.num_phys) 3868 if (phy_number < ioc->sas_hba.num_phys)
3926 _scsih_link_change(ioc, 3869 mpt2sas_transport_update_links(
3927 ioc->sas_hba.phy[phy_number].handle, 3870 ioc,
3928 handle, phy_number, link_rate_); 3871 ioc->sas_hba.phy[phy_number].handle,
3872 handle, phy_number, link_rate_);
3929 } else { 3873 } else {
3930 spin_lock_irqsave(&ioc->sas_node_lock, flags); 3874 spin_lock_irqsave(&ioc->sas_node_lock, flags);
3931 sas_expander = 3875 sas_expander =
@@ -3935,17 +3879,14 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
3935 flags); 3879 flags);
3936 if (sas_expander) { 3880 if (sas_expander) {
3937 if (phy_number < sas_expander->num_phys) 3881 if (phy_number < sas_expander->num_phys)
3938 _scsih_link_change(ioc, 3882 mpt2sas_transport_update_links(
3939 sas_expander-> 3883 ioc,
3940 phy[phy_number].handle, 3884 sas_expander->
3941 handle, phy_number, 3885 phy[phy_number].handle,
3942 link_rate_); 3886 handle, phy_number,
3887 link_rate_);
3943 } 3888 }
3944 } 3889 }
3945 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
3946 if (link_rate_ >= MPI2_SAS_NEG_LINK_RATE_1_5)
3947 _scsih_ublock_io_device(ioc, handle);
3948 }
3949 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) { 3890 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) {
3950 if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5) 3891 if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5)
3951 break; 3892 break;
@@ -4455,7 +4396,7 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
4455 return; 4396 return;
4456 } 4397 }
4457 4398
4458 _scsih_link_change(ioc, 4399 mpt2sas_transport_update_links(ioc,
4459 le16_to_cpu(sas_device_pg0.ParentDevHandle), 4400 le16_to_cpu(sas_device_pg0.ParentDevHandle),
4460 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 4401 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
4461 4402
@@ -4744,7 +4685,7 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4744 return; 4685 return;
4745 } 4686 }
4746 4687
4747 _scsih_link_change(ioc, 4688 mpt2sas_transport_update_links(ioc,
4748 le16_to_cpu(sas_device_pg0.ParentDevHandle), 4689 le16_to_cpu(sas_device_pg0.ParentDevHandle),
4749 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 4690 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
4750 4691
@@ -5156,22 +5097,9 @@ static void
5156_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) 5097_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
5157{ 5098{
5158 struct _sas_device *sas_device, *sas_device_next; 5099 struct _sas_device *sas_device, *sas_device_next;
5159 struct _sas_node *sas_expander, *sas_expander_next; 5100 struct _sas_node *sas_expander;
5160 struct _raid_device *raid_device, *raid_device_next; 5101 struct _raid_device *raid_device, *raid_device_next;
5161 unsigned long flags;
5162 5102
5163 _scsih_search_responding_sas_devices(ioc);
5164 _scsih_search_responding_raid_devices(ioc);
5165 _scsih_search_responding_expanders(ioc);
5166
5167 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5168 ioc->shost_recovery = 0;
5169 if (ioc->shost->shost_state == SHOST_RECOVERY) {
5170 printk(MPT2SAS_INFO_FMT "putting controller into "
5171 "SHOST_RUNNING\n", ioc->name);
5172 scsi_host_set_state(ioc->shost, SHOST_RUNNING);
5173 }
5174 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5175 5103
5176 list_for_each_entry_safe(sas_device, sas_device_next, 5104 list_for_each_entry_safe(sas_device, sas_device_next,
5177 &ioc->sas_device_list, list) { 5105 &ioc->sas_device_list, list) {
@@ -5207,16 +5135,63 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
5207 _scsih_raid_device_remove(ioc, raid_device); 5135 _scsih_raid_device_remove(ioc, raid_device);
5208 } 5136 }
5209 5137
5210 list_for_each_entry_safe(sas_expander, sas_expander_next, 5138 retry_expander_search:
5211 &ioc->sas_expander_list, list) { 5139 sas_expander = NULL;
5140 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
5212 if (sas_expander->responding) { 5141 if (sas_expander->responding) {
5213 sas_expander->responding = 0; 5142 sas_expander->responding = 0;
5214 continue; 5143 continue;
5215 } 5144 }
5216 printk("\tremoving expander: handle(0x%04x), "
5217 " sas_addr(0x%016llx)\n", sas_expander->handle,
5218 (unsigned long long)sas_expander->sas_address);
5219 _scsih_expander_remove(ioc, sas_expander->handle); 5145 _scsih_expander_remove(ioc, sas_expander->handle);
5146 goto retry_expander_search;
5147 }
5148}
5149
5150/**
5151 * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
5152 * @ioc: per adapter object
5153 * @reset_phase: phase
5154 *
5155 * The handler for doing any required cleanup or initialization.
5156 *
5157 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
5158 * MPT2_IOC_DONE_RESET
5159 *
5160 * Return nothing.
5161 */
5162void
5163mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
5164{
5165 switch (reset_phase) {
5166 case MPT2_IOC_PRE_RESET:
5167 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5168 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
5169 _scsih_fw_event_off(ioc);
5170 break;
5171 case MPT2_IOC_AFTER_RESET:
5172 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5173 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
5174 if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
5175 ioc->tm_cmds.status |= MPT2_CMD_RESET;
5176 mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
5177 complete(&ioc->tm_cmds.done);
5178 }
5179 _scsih_fw_event_on(ioc);
5180 _scsih_flush_running_cmds(ioc);
5181 break;
5182 case MPT2_IOC_DONE_RESET:
5183 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5184 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
5185 _scsih_sas_host_refresh(ioc, 0);
5186 _scsih_search_responding_sas_devices(ioc);
5187 _scsih_search_responding_raid_devices(ioc);
5188 _scsih_search_responding_expanders(ioc);
5189 break;
5190 case MPT2_IOC_RUNNING:
5191 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5192 "MPT2_IOC_RUNNING\n", ioc->name, __func__));
5193 _scsih_remove_unresponding_devices(ioc);
5194 break;
5220 } 5195 }
5221} 5196}
5222 5197
@@ -5236,14 +5211,6 @@ _firmware_event_work(struct work_struct *work)
5236 unsigned long flags; 5211 unsigned long flags;
5237 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; 5212 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
5238 5213
5239 /* This is invoked by calling _scsih_queue_rescan(). */
5240 if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
5241 _scsih_fw_event_free(ioc, fw_event);
5242 _scsih_sas_host_refresh(ioc, 1);
5243 _scsih_remove_unresponding_devices(ioc);
5244 return;
5245 }
5246
5247 /* the queue is being flushed so ignore this event */ 5214 /* the queue is being flushed so ignore this event */
5248 spin_lock_irqsave(&ioc->fw_event_lock, flags); 5215 spin_lock_irqsave(&ioc->fw_event_lock, flags);
5249 if (ioc->fw_events_off || ioc->remove_host) { 5216 if (ioc->fw_events_off || ioc->remove_host) {
@@ -5253,13 +5220,10 @@ _firmware_event_work(struct work_struct *work)
5253 } 5220 }
5254 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 5221 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5255 5222
5256 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5257 if (ioc->shost_recovery) { 5223 if (ioc->shost_recovery) {
5258 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5259 _scsih_fw_event_requeue(ioc, fw_event, 1000); 5224 _scsih_fw_event_requeue(ioc, fw_event, 1000);
5260 return; 5225 return;
5261 } 5226 }
5262 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5263 5227
5264 switch (fw_event->event) { 5228 switch (fw_event->event) {
5265 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 5229 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
@@ -5461,6 +5425,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5461 if (!sas_device) 5425 if (!sas_device)
5462 continue; 5426 continue;
5463 _scsih_remove_device(ioc, sas_device->handle); 5427 _scsih_remove_device(ioc, sas_device->handle);
5428 if (ioc->shost_recovery)
5429 return;
5464 goto retry_device_search; 5430 goto retry_device_search;
5465 } 5431 }
5466 } 5432 }
@@ -5482,6 +5448,8 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5482 if (!expander_sibling) 5448 if (!expander_sibling)
5483 continue; 5449 continue;
5484 _scsih_expander_remove(ioc, expander_sibling->handle); 5450 _scsih_expander_remove(ioc, expander_sibling->handle);
5451 if (ioc->shost_recovery)
5452 return;
5485 goto retry_expander_search; 5453 goto retry_expander_search;
5486 } 5454 }
5487 } 5455 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 686695b155c7..742324a0a11e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -140,11 +140,18 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
140 u32 device_info; 140 u32 device_info;
141 u32 ioc_status; 141 u32 ioc_status;
142 142
143 if (ioc->shost_recovery) {
144 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
145 __func__, ioc->name);
146 return -EFAULT;
147 }
148
143 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 149 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
144 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 150 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
145 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 151 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
152
146 ioc->name, __FILE__, __LINE__, __func__); 153 ioc->name, __FILE__, __LINE__, __func__);
147 return -1; 154 return -ENXIO;
148 } 155 }
149 156
150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 157 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
@@ -153,7 +160,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
153 printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)" 160 printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
154 "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, 161 "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
155 __FILE__, __LINE__, __func__); 162 __FILE__, __LINE__, __func__);
156 return -1; 163 return -EIO;
157 } 164 }
158 165
159 memset(identify, 0, sizeof(identify)); 166 memset(identify, 0, sizeof(identify));
@@ -288,21 +295,17 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
288 void *psge; 295 void *psge;
289 u32 sgl_flags; 296 u32 sgl_flags;
290 u8 issue_reset = 0; 297 u8 issue_reset = 0;
291 unsigned long flags;
292 void *data_out = NULL; 298 void *data_out = NULL;
293 dma_addr_t data_out_dma; 299 dma_addr_t data_out_dma;
294 u32 sz; 300 u32 sz;
295 u64 *sas_address_le; 301 u64 *sas_address_le;
296 u16 wait_state_count; 302 u16 wait_state_count;
297 303
298 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 304 if (ioc->shost_recovery) {
299 if (ioc->ioc_reset_in_progress) {
300 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
301 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 305 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
302 __func__, ioc->name); 306 __func__, ioc->name);
303 return -EFAULT; 307 return -EFAULT;
304 } 308 }
305 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
306 309
307 mutex_lock(&ioc->transport_cmds.mutex); 310 mutex_lock(&ioc->transport_cmds.mutex);
308 311
@@ -789,7 +792,7 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
789} 792}
790 793
791/** 794/**
792 * mpt2sas_transport_update_phy_link_change - refreshing phy link changes and attached devices 795 * mpt2sas_transport_update_links - refreshing phy link changes
793 * @ioc: per adapter object 796 * @ioc: per adapter object
794 * @handle: handle to sas_host or expander 797 * @handle: handle to sas_host or expander
795 * @attached_handle: attached device handle 798 * @attached_handle: attached device handle
@@ -799,13 +802,19 @@ mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
799 * Returns nothing. 802 * Returns nothing.
800 */ 803 */
801void 804void
802mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, 805mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
803 u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate) 806 u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate)
804{ 807{
805 unsigned long flags; 808 unsigned long flags;
806 struct _sas_node *sas_node; 809 struct _sas_node *sas_node;
807 struct _sas_phy *mpt2sas_phy; 810 struct _sas_phy *mpt2sas_phy;
808 811
812 if (ioc->shost_recovery) {
813 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
814 __func__, ioc->name);
815 return;
816 }
817
809 spin_lock_irqsave(&ioc->sas_node_lock, flags); 818 spin_lock_irqsave(&ioc->sas_node_lock, flags);
810 sas_node = _transport_sas_node_find_by_handle(ioc, handle); 819 sas_node = _transport_sas_node_find_by_handle(ioc, handle);
811 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 820 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@@ -1025,7 +1034,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1025 void *psge; 1034 void *psge;
1026 u32 sgl_flags; 1035 u32 sgl_flags;
1027 u8 issue_reset = 0; 1036 u8 issue_reset = 0;
1028 unsigned long flags;
1029 dma_addr_t dma_addr_in = 0; 1037 dma_addr_t dma_addr_in = 0;
1030 dma_addr_t dma_addr_out = 0; 1038 dma_addr_t dma_addr_out = 0;
1031 u16 wait_state_count; 1039 u16 wait_state_count;
@@ -1045,14 +1053,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1045 return -EINVAL; 1053 return -EINVAL;
1046 } 1054 }
1047 1055
1048 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 1056 if (ioc->shost_recovery) {
1049 if (ioc->ioc_reset_in_progress) {
1050 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1051 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 1057 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
1052 __func__, ioc->name); 1058 __func__, ioc->name);
1053 return -EFAULT; 1059 return -EFAULT;
1054 } 1060 }
1055 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1056 1061
1057 rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); 1062 rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
1058 if (rc) 1063 if (rc)
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 70b60ade049e..e32c344d7ad8 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1713,7 +1713,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
1713 1713
1714 nsp_dbg(NSP_DEBUG_INIT, "in"); 1714 nsp_dbg(NSP_DEBUG_INIT, "in");
1715 1715
1716 cfg_mem = kzalloc(sizeof(cfg_mem), GFP_KERNEL); 1716 cfg_mem = kzalloc(sizeof(*cfg_mem), GFP_KERNEL);
1717 if (!cfg_mem) 1717 if (!cfg_mem)
1718 return -ENOMEM; 1718 return -ENOMEM;
1719 cfg_mem->data = data; 1719 cfg_mem->data = data;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
new file mode 100644
index 000000000000..4302f06e4ec9
--- /dev/null
+++ b/drivers/scsi/pmcraid.c
@@ -0,0 +1,5604 @@
1/*
2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3 *
4 * Written By: PMC Sierra Corporation
5 *
6 * Copyright (C) 2008, 2009 PMC Sierra Inc
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
21 * USA
22 *
23 */
24#include <linux/fs.h>
25#include <linux/init.h>
26#include <linux/types.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/ioport.h>
30#include <linux/delay.h>
31#include <linux/pci.h>
32#include <linux/wait.h>
33#include <linux/spinlock.h>
34#include <linux/sched.h>
35#include <linux/interrupt.h>
36#include <linux/blkdev.h>
37#include <linux/firmware.h>
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/hdreg.h>
41#include <linux/version.h>
42#include <linux/io.h>
43#include <asm/irq.h>
44#include <asm/processor.h>
45#include <linux/libata.h>
46#include <linux/mutex.h>
47#include <scsi/scsi.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_tcq.h>
50#include <scsi/scsi_eh.h>
51#include <scsi/scsi_cmnd.h>
52#include <scsi/scsicam.h>
53
54#include "pmcraid.h"
55
56/*
57 * Module configuration parameters
58 */
59static unsigned int pmcraid_debug_log;
60static unsigned int pmcraid_disable_aen;
61static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
62
63/*
64 * Data structures to support multiple adapters by the LLD.
65 * pmcraid_adapter_count - count of configured adapters
66 */
67static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
68
69/*
70 * Supporting user-level control interface through IOCTL commands.
71 * pmcraid_major - major number to use
72 * pmcraid_minor - minor number(s) to use
73 */
74static unsigned int pmcraid_major;
75static struct class *pmcraid_class;
76DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
77
78/*
79 * Module parameters
80 */
81MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
82MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
83MODULE_LICENSE("GPL");
84MODULE_VERSION(PMCRAID_DRIVER_VERSION);
85
86module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
87MODULE_PARM_DESC(log_level,
88 "Enables firmware error code logging, default :1 high-severity"
89 " errors, 2: all errors including high-severity errors,"
90 " 0: disables logging");
91
92module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
93MODULE_PARM_DESC(debug,
94 "Enable driver verbose message logging. Set 1 to enable."
95 "(default: 0)");
96
97module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
98MODULE_PARM_DESC(disable_aen,
99 "Disable driver aen notifications to apps. Set 1 to disable."
100 "(default: 0)");
101
102/* chip specific constants for PMC MaxRAID controllers (same for
103 * 0x5220 and 0x8010
104 */
105static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
106 {
107 .ioastatus = 0x0,
108 .ioarrin = 0x00040,
109 .mailbox = 0x7FC30,
110 .global_intr_mask = 0x00034,
111 .ioa_host_intr = 0x0009C,
112 .ioa_host_intr_clr = 0x000A0,
113 .ioa_host_mask = 0x7FC28,
114 .ioa_host_mask_clr = 0x7FC28,
115 .host_ioa_intr = 0x00020,
116 .host_ioa_intr_clr = 0x00020,
117 .transop_timeout = 300
118 }
119};
120
121/*
122 * PCI device ids supported by pmcraid driver
123 */
124static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
125 { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
126 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
127 },
128 {}
129};
130
131MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
132
133
134
135/**
136 * pmcraid_slave_alloc - Prepare for commands to a device
137 * @scsi_dev: scsi device struct
138 *
139 * This function is called by mid-layer prior to sending any command to the new
140 * device. Stores resource entry details of the device in scsi_device struct.
141 * Queuecommand uses the resource handle and other details to fill up IOARCB
142 * while sending commands to the device.
143 *
144 * Return value:
145 * 0 on success / -ENXIO if device does not exist
146 */
147static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
148{
149 struct pmcraid_resource_entry *temp, *res = NULL;
150 struct pmcraid_instance *pinstance;
151 u8 target, bus, lun;
152 unsigned long lock_flags;
153 int rc = -ENXIO;
154 pinstance = shost_priv(scsi_dev->host);
155
156 /* Driver exposes VSET and GSCSI resources only; all other device types
157 * are not exposed. Resource list is synchronized using resource lock
158 * so any traversal or modifications to the list should be done inside
159 * this lock
160 */
161 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
162 list_for_each_entry(temp, &pinstance->used_res_q, queue) {
163
164 /* do not expose VSETs with order-ids >= 240 */
165 if (RES_IS_VSET(temp->cfg_entry)) {
166 target = temp->cfg_entry.unique_flags1;
167 if (target >= PMCRAID_MAX_VSET_TARGETS)
168 continue;
169 bus = PMCRAID_VSET_BUS_ID;
170 lun = 0;
171 } else if (RES_IS_GSCSI(temp->cfg_entry)) {
172 target = RES_TARGET(temp->cfg_entry.resource_address);
173 bus = PMCRAID_PHYS_BUS_ID;
174 lun = RES_LUN(temp->cfg_entry.resource_address);
175 } else {
176 continue;
177 }
178
179 if (bus == scsi_dev->channel &&
180 target == scsi_dev->id &&
181 lun == scsi_dev->lun) {
182 res = temp;
183 break;
184 }
185 }
186
187 if (res) {
188 res->scsi_dev = scsi_dev;
189 scsi_dev->hostdata = res;
190 res->change_detected = 0;
191 atomic_set(&res->read_failures, 0);
192 atomic_set(&res->write_failures, 0);
193 rc = 0;
194 }
195 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
196 return rc;
197}
198
199/**
200 * pmcraid_slave_configure - Configures a SCSI device
201 * @scsi_dev: scsi device struct
202 *
203 * This fucntion is executed by SCSI mid layer just after a device is first
204 * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
205 * timeout value (default 30s) will be over-written to a higher value (60s)
206 * and max_sectors value will be over-written to 512. It also sets queue depth
207 * to host->cmd_per_lun value
208 *
209 * Return value:
210 * 0 on success
211 */
212static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
213{
214 struct pmcraid_resource_entry *res = scsi_dev->hostdata;
215
216 if (!res)
217 return 0;
218
219 /* LLD exposes VSETs and Enclosure devices only */
220 if (RES_IS_GSCSI(res->cfg_entry) &&
221 scsi_dev->type != TYPE_ENCLOSURE)
222 return -ENXIO;
223
224 pmcraid_info("configuring %x:%x:%x:%x\n",
225 scsi_dev->host->unique_id,
226 scsi_dev->channel,
227 scsi_dev->id,
228 scsi_dev->lun);
229
230 if (RES_IS_GSCSI(res->cfg_entry)) {
231 scsi_dev->allow_restart = 1;
232 } else if (RES_IS_VSET(res->cfg_entry)) {
233 scsi_dev->allow_restart = 1;
234 blk_queue_rq_timeout(scsi_dev->request_queue,
235 PMCRAID_VSET_IO_TIMEOUT);
236 blk_queue_max_sectors(scsi_dev->request_queue,
237 PMCRAID_VSET_MAX_SECTORS);
238 }
239
240 if (scsi_dev->tagged_supported &&
241 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
242 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
243 scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG,
244 scsi_dev->host->cmd_per_lun);
245 } else {
246 scsi_adjust_queue_depth(scsi_dev, 0,
247 scsi_dev->host->cmd_per_lun);
248 }
249
250 return 0;
251}
252
253/**
254 * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
255 *
256 * @scsi_dev: scsi device struct
257 *
258 * This is called by mid-layer before removing a device. Pointer assignments
259 * done in pmcraid_slave_alloc will be reset to NULL here.
260 *
261 * Return value
262 * none
263 */
264static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
265{
266 struct pmcraid_resource_entry *res;
267
268 res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
269
270 if (res)
271 res->scsi_dev = NULL;
272
273 scsi_dev->hostdata = NULL;
274}
275
276/**
277 * pmcraid_change_queue_depth - Change the device's queue depth
278 * @scsi_dev: scsi device struct
279 * @depth: depth to set
280 *
281 * Return value
282 * actual depth set
283 */
284static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
285{
286 if (depth > PMCRAID_MAX_CMD_PER_LUN)
287 depth = PMCRAID_MAX_CMD_PER_LUN;
288
289 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
290
291 return scsi_dev->queue_depth;
292}
293
294/**
295 * pmcraid_change_queue_type - Change the device's queue type
296 * @scsi_dev: scsi device struct
297 * @tag: type of tags to use
298 *
299 * Return value:
300 * actual queue type set
301 */
302static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
303{
304 struct pmcraid_resource_entry *res;
305
306 res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
307
308 if ((res) && scsi_dev->tagged_supported &&
309 (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
310 scsi_set_tag_type(scsi_dev, tag);
311
312 if (tag)
313 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
314 else
315 scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
316 } else
317 tag = 0;
318
319 return tag;
320}
321
322
323/**
324 * pmcraid_init_cmdblk - initializes a command block
325 *
326 * @cmd: pointer to struct pmcraid_cmd to be initialized
327 * @index: if >=0 first time initialization; otherwise reinitialization
328 *
329 * Return Value
330 * None
331 */
332void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
333{
334 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
335 dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
336
337 if (index >= 0) {
338 /* first time initialization (called from probe) */
339 u32 ioasa_offset =
340 offsetof(struct pmcraid_control_block, ioasa);
341
342 cmd->index = index;
343 ioarcb->response_handle = cpu_to_le32(index << 2);
344 ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
345 ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
346 ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
347 } else {
348 /* re-initialization of various lengths, called once command is
349 * processed by IOA
350 */
351 memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
352 ioarcb->request_flags0 = 0;
353 ioarcb->request_flags1 = 0;
354 ioarcb->cmd_timeout = 0;
355 ioarcb->ioarcb_bus_addr &= (~0x1FULL);
356 ioarcb->ioadl_bus_addr = 0;
357 ioarcb->ioadl_length = 0;
358 ioarcb->data_transfer_length = 0;
359 ioarcb->add_cmd_param_length = 0;
360 ioarcb->add_cmd_param_offset = 0;
361 cmd->ioa_cb->ioasa.ioasc = 0;
362 cmd->ioa_cb->ioasa.residual_data_length = 0;
363 cmd->u.time_left = 0;
364 }
365
366 cmd->cmd_done = NULL;
367 cmd->scsi_cmd = NULL;
368 cmd->release = 0;
369 cmd->completion_req = 0;
370 cmd->dma_handle = 0;
371 init_timer(&cmd->timer);
372}
373
374/**
375 * pmcraid_reinit_cmdblk - reinitialize a command block
376 *
377 * @cmd: pointer to struct pmcraid_cmd to be reinitialized
378 *
379 * Return Value
380 * None
381 */
382static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
383{
384 pmcraid_init_cmdblk(cmd, -1);
385}
386
387/**
388 * pmcraid_get_free_cmd - get a free cmd block from command block pool
389 * @pinstance: adapter instance structure
390 *
391 * Return Value:
392 * returns pointer to cmd block or NULL if no blocks are available
393 */
394static struct pmcraid_cmd *pmcraid_get_free_cmd(
395 struct pmcraid_instance *pinstance
396)
397{
398 struct pmcraid_cmd *cmd = NULL;
399 unsigned long lock_flags;
400
401 /* free cmd block list is protected by free_pool_lock */
402 spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
403
404 if (!list_empty(&pinstance->free_cmd_pool)) {
405 cmd = list_entry(pinstance->free_cmd_pool.next,
406 struct pmcraid_cmd, free_list);
407 list_del(&cmd->free_list);
408 }
409 spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
410
411 /* Initialize the command block before giving it the caller */
412 if (cmd != NULL)
413 pmcraid_reinit_cmdblk(cmd);
414 return cmd;
415}
416
417/**
418 * pmcraid_return_cmd - return a completed command block back into free pool
419 * @cmd: pointer to the command block
420 *
421 * Return Value:
422 * nothing
423 */
424void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
425{
426 struct pmcraid_instance *pinstance = cmd->drv_inst;
427 unsigned long lock_flags;
428
429 spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
430 list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
431 spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
432}
433
434/**
435 * pmcraid_read_interrupts - reads IOA interrupts
436 *
437 * @pinstance: pointer to adapter instance structure
438 *
439 * Return value
440 * interrupts read from IOA
441 */
442static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
443{
444 return ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
445}
446
447/**
448 * pmcraid_disable_interrupts - Masks and clears all specified interrupts
449 *
450 * @pinstance: pointer to per adapter instance structure
451 * @intrs: interrupts to disable
452 *
453 * Return Value
454 * None
455 */
456static void pmcraid_disable_interrupts(
457 struct pmcraid_instance *pinstance,
458 u32 intrs
459)
460{
461 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
462 u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
463
464 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
465 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
466 iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
467 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
468}
469
470/**
471 * pmcraid_enable_interrupts - Enables specified interrupts
472 *
473 * @pinstance: pointer to per adapter instance structure
474 * @intr: interrupts to enable
475 *
476 * Return Value
477 * None
478 */
479static void pmcraid_enable_interrupts(
480 struct pmcraid_instance *pinstance,
481 u32 intrs
482)
483{
484 u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
485 u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
486
487 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
488 iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
489 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
490
491 pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
492 ioread32(pinstance->int_regs.global_interrupt_mask_reg),
493 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
494}
495
496/**
497 * pmcraid_reset_type - Determine the required reset type
498 * @pinstance: pointer to adapter instance structure
499 *
500 * IOA requires hard reset if any of the following conditions is true.
501 * 1. If HRRQ valid interrupt is not masked
502 * 2. IOA reset alert doorbell is set
503 * 3. If there are any error interrupts
504 */
505static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
506{
507 u32 mask;
508 u32 intrs;
509 u32 alerts;
510
511 mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
512 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
513 alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
514
515 if ((mask & INTRS_HRRQ_VALID) == 0 ||
516 (alerts & DOORBELL_IOA_RESET_ALERT) ||
517 (intrs & PMCRAID_ERROR_INTERRUPTS)) {
518 pmcraid_info("IOA requires hard reset\n");
519 pinstance->ioa_hard_reset = 1;
520 }
521
522 /* If unit check is active, trigger the dump */
523 if (intrs & INTRS_IOA_UNIT_CHECK)
524 pinstance->ioa_unit_check = 1;
525}
526
527/**
528 * pmcraid_bist_done - completion function for PCI BIST
529 * @cmd: pointer to reset command
530 * Return Value
531 * none
532 */
533
534static void pmcraid_ioa_reset(struct pmcraid_cmd *);
535
536static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
537{
538 struct pmcraid_instance *pinstance = cmd->drv_inst;
539 unsigned long lock_flags;
540 int rc;
541 u16 pci_reg;
542
543 rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
544
545 /* If PCI config space can't be accessed wait for another two secs */
546 if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
547 cmd->u.time_left > 0) {
548 pmcraid_info("BIST not complete, waiting another 2 secs\n");
549 cmd->timer.expires = jiffies + cmd->u.time_left;
550 cmd->u.time_left = 0;
551 cmd->timer.data = (unsigned long)cmd;
552 cmd->timer.function =
553 (void (*)(unsigned long))pmcraid_bist_done;
554 add_timer(&cmd->timer);
555 } else {
556 cmd->u.time_left = 0;
557 pmcraid_info("BIST is complete, proceeding with reset\n");
558 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
559 pmcraid_ioa_reset(cmd);
560 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
561 }
562}
563
564/**
565 * pmcraid_start_bist - starts BIST
566 * @cmd: pointer to reset cmd
567 * Return Value
568 * none
569 */
570static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
571{
572 struct pmcraid_instance *pinstance = cmd->drv_inst;
573 u32 doorbells, intrs;
574
575 /* proceed with bist and wait for 2 seconds */
576 iowrite32(DOORBELL_IOA_START_BIST,
577 pinstance->int_regs.host_ioa_interrupt_reg);
578 doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
579 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
580 pmcraid_info("doorbells after start bist: %x intrs: %x \n",
581 doorbells, intrs);
582
583 cmd->u.time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
584 cmd->timer.data = (unsigned long)cmd;
585 cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
586 cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
587 add_timer(&cmd->timer);
588}
589
590/**
591 * pmcraid_reset_alert_done - completion routine for reset_alert
592 * @cmd: pointer to command block used in reset sequence
593 * Return value
594 * None
595 */
596static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
597{
598 struct pmcraid_instance *pinstance = cmd->drv_inst;
599 u32 status = ioread32(pinstance->ioa_status);
600 unsigned long lock_flags;
601
602 /* if the critical operation in progress bit is set or the wait times
603 * out, invoke reset engine to proceed with hard reset. If there is
604 * some more time to wait, restart the timer
605 */
606 if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
607 cmd->u.time_left <= 0) {
608 pmcraid_info("critical op is reset proceeding with reset\n");
609 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
610 pmcraid_ioa_reset(cmd);
611 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
612 } else {
613 pmcraid_info("critical op is not yet reset waiting again\n");
614 /* restart timer if some more time is available to wait */
615 cmd->u.time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
616 cmd->timer.data = (unsigned long)cmd;
617 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
618 cmd->timer.function =
619 (void (*)(unsigned long))pmcraid_reset_alert_done;
620 add_timer(&cmd->timer);
621 }
622}
623
624/**
625 * pmcraid_reset_alert - alerts IOA for a possible reset
626 * @cmd : command block to be used for reset sequence.
627 *
628 * Return Value
629 * returns 0 if pci config-space is accessible and RESET_DOORBELL is
630 * successfully written to IOA. Returns non-zero in case pci_config_space
631 * is not accessible
632 */
633static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
634{
635 struct pmcraid_instance *pinstance = cmd->drv_inst;
636 u32 doorbells;
637 int rc;
638 u16 pci_reg;
639
640 /* If we are able to access IOA PCI config space, alert IOA that we are
641 * going to reset it soon. This enables IOA to preserv persistent error
642 * data if any. In case memory space is not accessible, proceed with
643 * BIST or slot_reset
644 */
645 rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
646 if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
647
648 /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
649 * reset IOA doesn't generate any interrupts when CRITICAL
650 * OPERATION bit is reset. A timer is started to wait for this
651 * bit to be reset.
652 */
653 cmd->u.time_left = PMCRAID_RESET_TIMEOUT;
654 cmd->timer.data = (unsigned long)cmd;
655 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
656 cmd->timer.function =
657 (void (*)(unsigned long))pmcraid_reset_alert_done;
658 add_timer(&cmd->timer);
659
660 iowrite32(DOORBELL_IOA_RESET_ALERT,
661 pinstance->int_regs.host_ioa_interrupt_reg);
662 doorbells =
663 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
664 pmcraid_info("doorbells after reset alert: %x\n", doorbells);
665 } else {
666 pmcraid_info("PCI config is not accessible starting BIST\n");
667 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
668 pmcraid_start_bist(cmd);
669 }
670}
671
672/**
673 * pmcraid_timeout_handler - Timeout handler for internally generated ops
674 *
675 * @cmd : pointer to command structure, that got timedout
676 *
677 * This function blocks host requests and initiates an adapter reset.
678 *
679 * Return value:
680 * None
681 */
682static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
683{
684 struct pmcraid_instance *pinstance = cmd->drv_inst;
685 unsigned long lock_flags;
686
687 dev_err(&pinstance->pdev->dev,
688 "Adapter being reset due to command timeout.\n");
689
690 /* Command timeouts result in hard reset sequence. The command that got
691 * timed out may be the one used as part of reset sequence. In this
692 * case restart reset sequence using the same command block even if
693 * reset is in progress. Otherwise fail this command and get a free
694 * command block to restart the reset sequence.
695 */
696 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
697 if (!pinstance->ioa_reset_in_progress) {
698 pinstance->ioa_reset_attempts = 0;
699 cmd = pmcraid_get_free_cmd(pinstance);
700
701 /* If we are out of command blocks, just return here itself.
702 * Some other command's timeout handler can do the reset job
703 */
704 if (cmd == NULL) {
705 spin_unlock_irqrestore(pinstance->host->host_lock,
706 lock_flags);
707 pmcraid_err("no free cmnd block for timeout handler\n");
708 return;
709 }
710
711 pinstance->reset_cmd = cmd;
712 pinstance->ioa_reset_in_progress = 1;
713 } else {
714 pmcraid_info("reset is already in progress\n");
715
716 if (pinstance->reset_cmd != cmd) {
717 /* This command should have been given to IOA, this
718 * command will be completed by fail_outstanding_cmds
719 * anyway
720 */
721 pmcraid_err("cmd is pending but reset in progress\n");
722 }
723
724 /* If this command was being used as part of the reset
725 * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
726 * causes fail_outstanding_commands not to return the command
727 * block back to free pool
728 */
729 if (cmd == pinstance->reset_cmd)
730 cmd->cmd_done = pmcraid_ioa_reset;
731
732 }
733
734 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
735 scsi_block_requests(pinstance->host);
736 pmcraid_reset_alert(cmd);
737 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
738}
739
740/**
741 * pmcraid_internal_done - completion routine for internally generated cmds
742 *
743 * @cmd: command that got response from IOA
744 *
745 * Return Value:
746 * none
747 */
748static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
749{
750 pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
751 cmd->ioa_cb->ioarcb.cdb[0],
752 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
753
754 /* Some of the internal commands are sent with callers blocking for the
755 * response. Same will be indicated as part of cmd->completion_req
756 * field. Response path needs to wake up any waiters waiting for cmd
757 * completion if this flag is set.
758 */
759 if (cmd->completion_req) {
760 cmd->completion_req = 0;
761 complete(&cmd->wait_for_completion);
762 }
763
764 /* most of the internal commands are completed by caller itself, so
765 * no need to return the command block back to free pool until we are
766 * required to do so (e.g once done with initialization).
767 */
768 if (cmd->release) {
769 cmd->release = 0;
770 pmcraid_return_cmd(cmd);
771 }
772}
773
774/**
775 * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
776 *
777 * @cmd: command that got response from IOA
778 *
779 * This routine is called after driver re-reads configuration table due to a
780 * lost CCN. It returns the command block back to free pool and schedules
781 * worker thread to add/delete devices into the system.
782 *
783 * Return Value:
784 * none
785 */
786static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
787{
788 pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
789 cmd->ioa_cb->ioarcb.cdb[0],
790 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
791
792 if (cmd->release) {
793 cmd->release = 0;
794 pmcraid_return_cmd(cmd);
795 }
796 pmcraid_info("scheduling worker for config table reinitialization\n");
797 schedule_work(&cmd->drv_inst->worker_q);
798}
799
800/**
801 * pmcraid_erp_done - Process completion of SCSI error response from device
802 * @cmd: pmcraid_command
803 *
804 * This function copies the sense buffer into the scsi_cmd struct and completes
805 * scsi_cmd by calling scsi_done function.
806 *
807 * Return value:
808 * none
809 */
810static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
811{
812 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
813 struct pmcraid_instance *pinstance = cmd->drv_inst;
814 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
815
816 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
817 scsi_cmd->result |= (DID_ERROR << 16);
818 pmcraid_err("command CDB[0] = %x failed with IOASC: 0x%08X\n",
819 cmd->ioa_cb->ioarcb.cdb[0], ioasc);
820 }
821
822 /* if we had allocated sense buffers for request sense, copy the sense
823 * release the buffers
824 */
825 if (cmd->sense_buffer != NULL) {
826 memcpy(scsi_cmd->sense_buffer,
827 cmd->sense_buffer,
828 SCSI_SENSE_BUFFERSIZE);
829 pci_free_consistent(pinstance->pdev,
830 SCSI_SENSE_BUFFERSIZE,
831 cmd->sense_buffer, cmd->sense_buffer_dma);
832 cmd->sense_buffer = NULL;
833 cmd->sense_buffer_dma = 0;
834 }
835
836 scsi_dma_unmap(scsi_cmd);
837 pmcraid_return_cmd(cmd);
838 scsi_cmd->scsi_done(scsi_cmd);
839}
840
841/**
842 * pmcraid_fire_command - sends an IOA command to adapter
843 *
844 * This function adds the given block into pending command list
845 * and returns without waiting
846 *
847 * @cmd : command to be sent to the device
848 *
849 * Return Value
850 * None
851 */
852static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
853{
854 struct pmcraid_instance *pinstance = cmd->drv_inst;
855 unsigned long lock_flags;
856
857 /* Add this command block to pending cmd pool. We do this prior to
858 * writting IOARCB to ioarrin because IOA might complete the command
859 * by the time we are about to add it to the list. Response handler
860 * (isr/tasklet) looks for cmb block in the pending pending list.
861 */
862 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
863 list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
864 spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
865 atomic_inc(&pinstance->outstanding_cmds);
866
867 /* driver writes lower 32-bit value of IOARCB address only */
868 mb();
869 iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
870 pinstance->ioarrin);
871}
872
873/**
874 * pmcraid_send_cmd - fires a command to IOA
875 *
876 * This function also sets up timeout function, and command completion
877 * function
878 *
879 * @cmd: pointer to the command block to be fired to IOA
880 * @cmd_done: command completion function, called once IOA responds
881 * @timeout: timeout to wait for this command completion
882 * @timeout_func: timeout handler
883 *
884 * Return value
885 * none
886 */
887static void pmcraid_send_cmd(
888 struct pmcraid_cmd *cmd,
889 void (*cmd_done) (struct pmcraid_cmd *),
890 unsigned long timeout,
891 void (*timeout_func) (struct pmcraid_cmd *)
892)
893{
894 /* initialize done function */
895 cmd->cmd_done = cmd_done;
896
897 if (timeout_func) {
898 /* setup timeout handler */
899 cmd->timer.data = (unsigned long)cmd;
900 cmd->timer.expires = jiffies + timeout;
901 cmd->timer.function = (void (*)(unsigned long))timeout_func;
902 add_timer(&cmd->timer);
903 }
904
905 /* fire the command to IOA */
906 _pmcraid_fire_command(cmd);
907}
908
909/**
910 * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
911 *
912 * @cmd: pointer to the command block used as part of reset sequence
913 *
914 * Return Value
915 * None
916 */
917static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
918{
919 pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
920 cmd->ioa_cb->ioarcb.cdb[0],
921 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
922
923 /* Note that commands sent during reset require next command to be sent
924 * to IOA. Hence reinit the done function as well as timeout function
925 */
926 pmcraid_reinit_cmdblk(cmd);
927 cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
928 cmd->ioa_cb->ioarcb.resource_handle =
929 cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
930 cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
931 cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
932
933 /* fire shutdown command to hardware. */
934 pmcraid_info("firing normal shutdown command (%d) to IOA\n",
935 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
936
937 pmcraid_send_cmd(cmd, pmcraid_ioa_reset,
938 PMCRAID_SHUTDOWN_TIMEOUT,
939 pmcraid_timeout_handler);
940}
941
942/**
943 * pmcraid_identify_hrrq - registers host rrq buffers with IOA
944 * @cmd: pointer to command block to be used for identify hrrq
945 *
946 * Return Value
947 * 0 in case of success, otherwise non-zero failure code
948 */
949
950static void pmcraid_querycfg(struct pmcraid_cmd *);
951
952static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
953{
954 struct pmcraid_instance *pinstance = cmd->drv_inst;
955 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
956 int index = 0;
957 __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
958 u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
959
960 pmcraid_reinit_cmdblk(cmd);
961
962 /* Initialize ioarcb */
963 ioarcb->request_type = REQ_TYPE_IOACMD;
964 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
965
966 /* initialize the hrrq number where IOA will respond to this command */
967 ioarcb->hrrq_id = index;
968 ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
969 ioarcb->cdb[1] = index;
970
971 /* IOA expects 64-bit pci address to be written in B.E format
972 * (i.e cdb[2]=MSByte..cdb[9]=LSB.
973 */
974 pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb => %llx:%llx\n",
975 hrrq_addr, ioarcb->ioarcb_bus_addr);
976
977 memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
978 memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
979
980 /* Subsequent commands require HRRQ identification to be successful.
981 * Note that this gets called even during reset from SCSI mid-layer
982 * or tasklet
983 */
984 pmcraid_send_cmd(cmd, pmcraid_querycfg,
985 PMCRAID_INTERNAL_TIMEOUT,
986 pmcraid_timeout_handler);
987}
988
989static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
990static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
991
992/**
993 * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
994 *
995 * @cmd: initialized command block pointer
996 *
997 * Return Value
998 * none
999 */
1000static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
1001{
1002 if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
1003 atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
1004 else
1005 atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
1006
1007 pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
1008}
1009
1010/**
1011 * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
1012 *
1013 * @pinstance: pointer to adapter instance structure
1014 * @type: HCAM type
1015 *
1016 * Return Value
1017 * pointer to initialized pmcraid_cmd structure or NULL
1018 */
1019static struct pmcraid_cmd *pmcraid_init_hcam
1020(
1021 struct pmcraid_instance *pinstance,
1022 u8 type
1023)
1024{
1025 struct pmcraid_cmd *cmd;
1026 struct pmcraid_ioarcb *ioarcb;
1027 struct pmcraid_ioadl_desc *ioadl;
1028 struct pmcraid_hostrcb *hcam;
1029 void (*cmd_done) (struct pmcraid_cmd *);
1030 dma_addr_t dma;
1031 int rcb_size;
1032
1033 cmd = pmcraid_get_free_cmd(pinstance);
1034
1035 if (!cmd) {
1036 pmcraid_err("no free command blocks for hcam\n");
1037 return cmd;
1038 }
1039
1040 if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
1041 rcb_size = sizeof(struct pmcraid_hcam_ccn);
1042 cmd_done = pmcraid_process_ccn;
1043 dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
1044 hcam = &pinstance->ccn;
1045 } else {
1046 rcb_size = sizeof(struct pmcraid_hcam_ldn);
1047 cmd_done = pmcraid_process_ldn;
1048 dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
1049 hcam = &pinstance->ldn;
1050 }
1051
1052 /* initialize command pointer used for HCAM registration */
1053 hcam->cmd = cmd;
1054
1055 ioarcb = &cmd->ioa_cb->ioarcb;
1056 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1057 offsetof(struct pmcraid_ioarcb,
1058 add_data.u.ioadl[0]));
1059 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1060 ioadl = ioarcb->add_data.u.ioadl;
1061
1062 /* Initialize ioarcb */
1063 ioarcb->request_type = REQ_TYPE_HCAM;
1064 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1065 ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
1066 ioarcb->cdb[1] = type;
1067 ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
1068 ioarcb->cdb[8] = (rcb_size) & 0xFF;
1069
1070 ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
1071
1072 ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST);
1073 ioadl[0].data_len = cpu_to_le32(rcb_size);
1074 ioadl[0].address = cpu_to_le32(dma);
1075
1076 cmd->cmd_done = cmd_done;
1077 return cmd;
1078}
1079
1080/**
1081 * pmcraid_send_hcam - Send an HCAM to IOA
1082 * @pinstance: ioa config struct
1083 * @type: HCAM type
1084 *
1085 * This function will send a Host Controlled Async command to IOA.
1086 *
1087 * Return value:
1088 * none
1089 */
1090static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
1091{
1092 struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
1093 pmcraid_send_hcam_cmd(cmd);
1094}
1095
1096
1097/**
1098 * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
1099 *
1100 * @cmd: pointer to cmd that is used as cancelling command
1101 * @cmd_to_cancel: pointer to the command that needs to be cancelled
1102 */
1103static void pmcraid_prepare_cancel_cmd(
1104 struct pmcraid_cmd *cmd,
1105 struct pmcraid_cmd *cmd_to_cancel
1106)
1107{
1108 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1109 __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
1110
1111 /* Get the resource handle to where the command to be aborted has been
1112 * sent.
1113 */
1114 ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
1115 ioarcb->request_type = REQ_TYPE_IOACMD;
1116 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
1117 ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
1118
1119 /* IOARCB address of the command to be cancelled is given in
1120 * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
1121 * IOARCB address are not masked.
1122 */
1123 ioarcb_addr = cpu_to_be64(ioarcb_addr);
1124 memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
1125}
1126
1127/**
1128 * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
1129 *
1130 * @cmd: command to be used as cancelling command
1131 * @type: HCAM type
1132 * @cmd_done: op done function for the cancelling command
1133 */
1134static void pmcraid_cancel_hcam(
1135 struct pmcraid_cmd *cmd,
1136 u8 type,
1137 void (*cmd_done) (struct pmcraid_cmd *)
1138)
1139{
1140 struct pmcraid_instance *pinstance;
1141 struct pmcraid_hostrcb *hcam;
1142
1143 pinstance = cmd->drv_inst;
1144 hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
1145 &pinstance->ldn : &pinstance->ccn;
1146
1147 /* prepare for cancelling previous hcam command. If the HCAM is
1148 * currently not pending with IOA, we would have hcam->cmd as non-null
1149 */
1150 if (hcam->cmd == NULL)
1151 return;
1152
1153 pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
1154
1155 /* writing to IOARRIN must be protected by host_lock, as mid-layer
1156 * schedule queuecommand while we are doing this
1157 */
1158 pmcraid_send_cmd(cmd, cmd_done,
1159 PMCRAID_INTERNAL_TIMEOUT,
1160 pmcraid_timeout_handler);
1161}
1162
1163/**
1164 * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
1165 *
1166 * @cmd: command block to be used for cancelling the HCAM
1167 */
1168static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
1169{
1170 pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
1171 cmd->ioa_cb->ioarcb.cdb[0],
1172 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
1173
1174 pmcraid_reinit_cmdblk(cmd);
1175
1176 pmcraid_cancel_hcam(cmd,
1177 PMCRAID_HCAM_CODE_CONFIG_CHANGE,
1178 pmcraid_ioa_shutdown);
1179}
1180
1181/**
1182 * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
1183 *
1184 * @cmd: command block to be used for cancelling the HCAM
1185 */
1186static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
1187{
1188 pmcraid_cancel_hcam(cmd,
1189 PMCRAID_HCAM_CODE_LOG_DATA,
1190 pmcraid_cancel_ccn);
1191}
1192
1193/**
1194 * pmcraid_expose_resource - check if the resource can be exposed to OS
1195 *
1196 * @cfgte: pointer to configuration table entry of the resource
1197 *
1198 * Return value:
1199 * true if resource can be added to midlayer, false(0) otherwise
1200 */
1201static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
1202{
1203 int retval = 0;
1204
1205 if (cfgte->resource_type == RES_TYPE_VSET)
1206 retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE);
1207 else if (cfgte->resource_type == RES_TYPE_GSCSI)
1208 retval = (RES_BUS(cfgte->resource_address) !=
1209 PMCRAID_VIRTUAL_ENCL_BUS_ID);
1210 return retval;
1211}
1212
1213/* attributes supported by pmcraid_event_family */
1214enum {
1215 PMCRAID_AEN_ATTR_UNSPEC,
1216 PMCRAID_AEN_ATTR_EVENT,
1217 __PMCRAID_AEN_ATTR_MAX,
1218};
1219#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
1220
1221/* commands supported by pmcraid_event_family */
1222enum {
1223 PMCRAID_AEN_CMD_UNSPEC,
1224 PMCRAID_AEN_CMD_EVENT,
1225 __PMCRAID_AEN_CMD_MAX,
1226};
1227#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
1228
1229static struct genl_family pmcraid_event_family = {
1230 .id = GENL_ID_GENERATE,
1231 .name = "pmcraid",
1232 .version = 1,
1233 .maxattr = PMCRAID_AEN_ATTR_MAX
1234};
1235
1236/**
1237 * pmcraid_netlink_init - registers pmcraid_event_family
1238 *
1239 * Return value:
1240 * 0 if the pmcraid_event_family is successfully registered
1241 * with netlink generic, non-zero otherwise
1242 */
1243static int pmcraid_netlink_init(void)
1244{
1245 int result;
1246
1247 result = genl_register_family(&pmcraid_event_family);
1248
1249 if (result)
1250 return result;
1251
1252 pmcraid_info("registered NETLINK GENERIC group: %d\n",
1253 pmcraid_event_family.id);
1254
1255 return result;
1256}
1257
1258/**
1259 * pmcraid_netlink_release - unregisters pmcraid_event_family
1260 *
1261 * Return value:
1262 * none
1263 */
1264static void pmcraid_netlink_release(void)
1265{
1266 genl_unregister_family(&pmcraid_event_family);
1267}
1268
1269/**
1270 * pmcraid_notify_aen - sends event msg to user space application
1271 * @pinstance: pointer to adapter instance structure
1272 * @type: HCAM type
1273 *
1274 * Return value:
1275 * 0 if success, error value in case of any failure.
1276 */
1277static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1278{
1279 struct sk_buff *skb;
1280 struct pmcraid_aen_msg *aen_msg;
1281 void *msg_header;
1282 int data_size, total_size;
1283 int result;
1284
1285
1286 if (type == PMCRAID_HCAM_CODE_LOG_DATA) {
1287 aen_msg = pinstance->ldn.msg;
1288 data_size = pinstance->ldn.hcam->data_len;
1289 } else {
1290 aen_msg = pinstance->ccn.msg;
1291 data_size = pinstance->ccn.hcam->data_len;
1292 }
1293
1294 data_size += sizeof(struct pmcraid_hcam_hdr);
1295 aen_msg->hostno = (pinstance->host->unique_id << 16 |
1296 MINOR(pinstance->cdev.dev));
1297 aen_msg->length = data_size;
1298 data_size += sizeof(*aen_msg);
1299
1300 total_size = nla_total_size(data_size);
1301 skb = genlmsg_new(total_size, GFP_ATOMIC);
1302
1303
1304 if (!skb) {
1305 pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
1306 total_size);
1307 return -ENOMEM;
1308 }
1309
1310 /* add the genetlink message header */
1311 msg_header = genlmsg_put(skb, 0, 0,
1312 &pmcraid_event_family, 0,
1313 PMCRAID_AEN_CMD_EVENT);
1314 if (!msg_header) {
1315 pmcraid_err("failed to copy command details\n");
1316 nlmsg_free(skb);
1317 return -ENOMEM;
1318 }
1319
1320 result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
1321
1322 if (result) {
1323 pmcraid_err("failed to copy AEN attribute data \n");
1324 nlmsg_free(skb);
1325 return -EINVAL;
1326 }
1327
1328 /* send genetlink multicast message to notify appplications */
1329 result = genlmsg_end(skb, msg_header);
1330
1331 if (result < 0) {
1332 pmcraid_err("genlmsg_end failed\n");
1333 nlmsg_free(skb);
1334 return result;
1335 }
1336
1337 result =
1338 genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC);
1339
1340 /* If there are no listeners, genlmsg_multicast may return non-zero
1341 * value.
1342 */
1343 if (result)
1344 pmcraid_info("failed to send %s event message %x!\n",
1345 type == PMCRAID_HCAM_CODE_LOG_DATA ? "LDN" : "CCN",
1346 result);
1347 return result;
1348}
1349
1350/**
1351 * pmcraid_handle_config_change - Handle a config change from the adapter
1352 * @pinstance: pointer to per adapter instance structure
1353 *
1354 * Return value:
1355 * none
1356 */
1357static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1358{
1359 struct pmcraid_config_table_entry *cfg_entry;
1360 struct pmcraid_hcam_ccn *ccn_hcam;
1361 struct pmcraid_cmd *cmd;
1362 struct pmcraid_cmd *cfgcmd;
1363 struct pmcraid_resource_entry *res = NULL;
1364 u32 new_entry = 1;
1365 unsigned long lock_flags;
1366 unsigned long host_lock_flags;
1367 int rc;
1368
1369 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
1370 cfg_entry = &ccn_hcam->cfg_entry;
1371
1372 pmcraid_info
1373 ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
1374 pinstance->ccn.hcam->ilid,
1375 pinstance->ccn.hcam->op_code,
1376 pinstance->ccn.hcam->notification_type,
1377 pinstance->ccn.hcam->notification_lost,
1378 pinstance->ccn.hcam->flags,
1379 pinstance->host->unique_id,
1380 RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
1381 (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
1382 RES_BUS(cfg_entry->resource_address)),
1383 RES_IS_VSET(*cfg_entry) ? cfg_entry->unique_flags1 :
1384 RES_TARGET(cfg_entry->resource_address),
1385 RES_LUN(cfg_entry->resource_address));
1386
1387
1388 /* If this HCAM indicates a lost notification, read the config table */
1389 if (pinstance->ccn.hcam->notification_lost) {
1390 cfgcmd = pmcraid_get_free_cmd(pinstance);
1391 if (cfgcmd) {
1392 pmcraid_info("lost CCN, reading config table\b");
1393 pinstance->reinit_cfg_table = 1;
1394 pmcraid_querycfg(cfgcmd);
1395 } else {
1396 pmcraid_err("lost CCN, no free cmd for querycfg\n");
1397 }
1398 goto out_notify_apps;
1399 }
1400
1401 /* If this resource is not going to be added to mid-layer, just notify
1402 * applications and return
1403 */
1404 if (!pmcraid_expose_resource(cfg_entry))
1405 goto out_notify_apps;
1406
1407 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
1408 list_for_each_entry(res, &pinstance->used_res_q, queue) {
1409 rc = memcmp(&res->cfg_entry.resource_address,
1410 &cfg_entry->resource_address,
1411 sizeof(cfg_entry->resource_address));
1412 if (!rc) {
1413 new_entry = 0;
1414 break;
1415 }
1416 }
1417
1418 if (new_entry) {
1419
1420 /* If there are more number of resources than what driver can
1421 * manage, do not notify the applications about the CCN. Just
1422 * ignore this notifications and re-register the same HCAM
1423 */
1424 if (list_empty(&pinstance->free_res_q)) {
1425 spin_unlock_irqrestore(&pinstance->resource_lock,
1426 lock_flags);
1427 pmcraid_err("too many resources attached\n");
1428 spin_lock_irqsave(pinstance->host->host_lock,
1429 host_lock_flags);
1430 pmcraid_send_hcam(pinstance,
1431 PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1432 spin_unlock_irqrestore(pinstance->host->host_lock,
1433 host_lock_flags);
1434 return;
1435 }
1436
1437 res = list_entry(pinstance->free_res_q.next,
1438 struct pmcraid_resource_entry, queue);
1439
1440 list_del(&res->queue);
1441 res->scsi_dev = NULL;
1442 res->reset_progress = 0;
1443 list_add_tail(&res->queue, &pinstance->used_res_q);
1444 }
1445
1446 memcpy(&res->cfg_entry, cfg_entry,
1447 sizeof(struct pmcraid_config_table_entry));
1448
1449 if (pinstance->ccn.hcam->notification_type ==
1450 NOTIFICATION_TYPE_ENTRY_DELETED) {
1451 if (res->scsi_dev) {
1452 res->change_detected = RES_CHANGE_DEL;
1453 res->cfg_entry.resource_handle =
1454 PMCRAID_INVALID_RES_HANDLE;
1455 schedule_work(&pinstance->worker_q);
1456 } else {
1457 /* This may be one of the non-exposed resources */
1458 list_move_tail(&res->queue, &pinstance->free_res_q);
1459 }
1460 } else if (!res->scsi_dev) {
1461 res->change_detected = RES_CHANGE_ADD;
1462 schedule_work(&pinstance->worker_q);
1463 }
1464 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
1465
1466out_notify_apps:
1467
1468 /* Notify configuration changes to registered applications.*/
1469 if (!pmcraid_disable_aen)
1470 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1471
1472 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1473 if (cmd)
1474 pmcraid_send_hcam_cmd(cmd);
1475}
1476
1477/**
1478 * pmcraid_get_error_info - return error string for an ioasc
1479 * @ioasc: ioasc code
1480 * Return Value
1481 * none
1482 */
1483static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
1484{
1485 int i;
1486 for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
1487 if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
1488 return &pmcraid_ioasc_error_table[i];
1489 }
1490 return NULL;
1491}
1492
1493/**
1494 * pmcraid_ioasc_logger - log IOASC information based user-settings
1495 * @ioasc: ioasc code
1496 * @cmd: pointer to command that resulted in 'ioasc'
1497 */
1498void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
1499{
1500 struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
1501
1502 if (error_info == NULL ||
1503 cmd->drv_inst->current_log_level < error_info->log_level)
1504 return;
1505
1506 /* log the error string */
1507 pmcraid_err("cmd [%d] for resource %x failed with %x(%s)\n",
1508 cmd->ioa_cb->ioarcb.cdb[0],
1509 cmd->ioa_cb->ioarcb.resource_handle,
1510 le32_to_cpu(ioasc), error_info->error_string);
1511}
1512
1513/**
1514 * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
1515 *
1516 * @pinstance: pointer to per adapter instance structure
1517 *
1518 * Return value:
1519 * none
1520 */
1521static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
1522{
1523 struct pmcraid_hcam_ldn *hcam_ldn;
1524 u32 ioasc;
1525
1526 hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1527
1528 pmcraid_info
1529 ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
1530 pinstance->ldn.hcam->ilid,
1531 pinstance->ldn.hcam->op_code,
1532 pinstance->ldn.hcam->notification_type,
1533 pinstance->ldn.hcam->notification_lost,
1534 pinstance->ldn.hcam->flags,
1535 pinstance->ldn.hcam->overlay_id);
1536
1537 /* log only the errors, no need to log informational log entries */
1538 if (pinstance->ldn.hcam->notification_type !=
1539 NOTIFICATION_TYPE_ERROR_LOG)
1540 return;
1541
1542 if (pinstance->ldn.hcam->notification_lost ==
1543 HOSTRCB_NOTIFICATIONS_LOST)
1544 dev_err(&pinstance->pdev->dev, "Error notifications lost\n");
1545
1546 ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
1547
1548 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
1549 ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
1550 dev_err(&pinstance->pdev->dev,
1551 "UnitAttention due to IOA Bus Reset\n");
1552 scsi_report_bus_reset(
1553 pinstance->host,
1554 RES_BUS(hcam_ldn->error_log.fd_ra));
1555 }
1556
1557 return;
1558}
1559
1560/**
1561 * pmcraid_process_ccn - Op done function for a CCN.
1562 * @cmd: pointer to command struct
1563 *
1564 * This function is the op done function for a configuration
1565 * change notification
1566 *
1567 * Return value:
1568 * none
1569 */
1570static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
1571{
1572 struct pmcraid_instance *pinstance = cmd->drv_inst;
1573 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1574 unsigned long lock_flags;
1575
1576 pinstance->ccn.cmd = NULL;
1577 pmcraid_return_cmd(cmd);
1578
1579 /* If driver initiated IOA reset happened while this hcam was pending
1580 * with IOA, or IOA bringdown sequence is in progress, no need to
1581 * re-register the hcam
1582 */
1583 if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1584 atomic_read(&pinstance->ccn.ignore) == 1) {
1585 return;
1586 } else if (ioasc) {
1587 dev_err(&pinstance->pdev->dev,
1588 "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
1589 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1590 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1591 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1592 } else {
1593 pmcraid_handle_config_change(pinstance);
1594 }
1595}
1596
1597/**
1598 * pmcraid_process_ldn - op done function for an LDN
1599 * @cmd: pointer to command block
1600 *
1601 * Return value
1602 * none
1603 */
1604static void pmcraid_initiate_reset(struct pmcraid_instance *);
1605
1606static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1607{
1608 struct pmcraid_instance *pinstance = cmd->drv_inst;
1609 struct pmcraid_hcam_ldn *ldn_hcam =
1610 (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1611 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1612 u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
1613 unsigned long lock_flags;
1614
1615 /* return the command block back to freepool */
1616 pinstance->ldn.cmd = NULL;
1617 pmcraid_return_cmd(cmd);
1618
1619 /* If driver initiated IOA reset happened while this hcam was pending
1620 * with IOA, no need to re-register the hcam as reset engine will do it
1621 * once reset sequence is complete
1622 */
1623 if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1624 atomic_read(&pinstance->ccn.ignore) == 1) {
1625 return;
1626 } else if (!ioasc) {
1627 pmcraid_handle_error_log(pinstance);
1628 if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
1629 spin_lock_irqsave(pinstance->host->host_lock,
1630 lock_flags);
1631 pmcraid_initiate_reset(pinstance);
1632 spin_unlock_irqrestore(pinstance->host->host_lock,
1633 lock_flags);
1634 return;
1635 }
1636 } else {
1637 dev_err(&pinstance->pdev->dev,
1638 "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
1639 }
1640 /* send netlink message for HCAM notification if enabled */
1641 if (!pmcraid_disable_aen)
1642 pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1643
1644 cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1645 if (cmd)
1646 pmcraid_send_hcam_cmd(cmd);
1647}
1648
1649/**
1650 * pmcraid_register_hcams - register HCAMs for CCN and LDN
1651 *
1652 * @pinstance: pointer per adapter instance structure
1653 *
1654 * Return Value
1655 * none
1656 */
1657static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
1658{
1659 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1660 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1661}
1662
1663/**
1664 * pmcraid_unregister_hcams - cancel HCAMs registered already
1665 * @cmd: pointer to command used as part of reset sequence
1666 */
1667static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
1668{
1669 struct pmcraid_instance *pinstance = cmd->drv_inst;
1670
1671 /* During IOA bringdown, HCAM gets fired and tasklet proceeds with
1672 * handling hcam response though it is not necessary. In order to
1673 * prevent this, set 'ignore', so that bring-down sequence doesn't
1674 * re-send any more hcams
1675 */
1676 atomic_set(&pinstance->ccn.ignore, 1);
1677 atomic_set(&pinstance->ldn.ignore, 1);
1678
1679 /* If adapter reset was forced as part of runtime reset sequence,
1680 * start the reset sequence.
1681 */
1682 if (pinstance->force_ioa_reset && !pinstance->ioa_bringdown) {
1683 pinstance->force_ioa_reset = 0;
1684 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1685 pmcraid_reset_alert(cmd);
1686 return;
1687 }
1688
1689 /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
1690 * one after the other. So CCN cancellation will be triggered by
1691 * pmcraid_cancel_ldn itself.
1692 */
1693 pmcraid_cancel_ldn(cmd);
1694}
1695
1696/**
1697 * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
1698 * @pinstance: pointer to adapter instance structure
1699 * Return Value
1700 * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
1701 */
1702static void pmcraid_reinit_buffers(struct pmcraid_instance *);
1703
1704static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
1705{
1706 u32 intrs;
1707
1708 pmcraid_reinit_buffers(pinstance);
1709 intrs = pmcraid_read_interrupts(pinstance);
1710
1711 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
1712
1713 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
1714 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1715 pinstance->int_regs.ioa_host_interrupt_mask_reg);
1716 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1717 pinstance->int_regs.ioa_host_interrupt_clr_reg);
1718 return 1;
1719 } else {
1720 return 0;
1721 }
1722}
1723
1724/**
1725 * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
1726 * @cmd : pointer to reset command block
1727 *
1728 * Return Value
1729 * none
1730 */
1731static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
1732{
1733 struct pmcraid_instance *pinstance = cmd->drv_inst;
1734 u32 int_reg;
1735 u32 doorbell;
1736
1737 /* There will be an interrupt when Transition to Operational bit is
1738 * set so tasklet would execute next reset task. The timeout handler
1739 * would re-initiate a reset
1740 */
1741 cmd->cmd_done = pmcraid_ioa_reset;
1742 cmd->timer.data = (unsigned long)cmd;
1743 cmd->timer.expires = jiffies +
1744 msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
1745 cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
1746
1747 if (!timer_pending(&cmd->timer))
1748 add_timer(&cmd->timer);
1749
1750 /* Enable destructive diagnostics on IOA if it is not yet in
1751 * operational state
1752 */
1753 doorbell = DOORBELL_RUNTIME_RESET |
1754 DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
1755
1756 iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
1757 int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
1758 pmcraid_info("Waiting for IOA to become operational %x:%x\n",
1759 ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
1760 int_reg);
1761}
1762
1763/**
1764 * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
1765 *
1766 * @pinstance: pointer to adapter instance structure
1767 *
1768 * Return Value
1769 * none
1770 */
1771static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
1772{
1773 pmcraid_info("%s is not yet implemented\n", __func__);
1774}
1775
1776/**
1777 * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
1778 * @pinstance: pointer to adapter instance structure
1779 *
1780 * This function fails all outstanding ops. If they are submitted to IOA
1781 * already, it sends cancel all messages if IOA is still accepting IOARCBs,
1782 * otherwise just completes the commands and returns the cmd blocks to free
1783 * pool.
1784 *
1785 * Return value:
1786 * none
1787 */
1788static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
1789{
1790 struct pmcraid_cmd *cmd, *temp;
1791 unsigned long lock_flags;
1792
1793 /* pending command list is protected by pending_pool_lock. Its
1794 * traversal must be done as within this lock
1795 */
1796 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
1797 list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
1798 free_list) {
1799 list_del(&cmd->free_list);
1800 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
1801 lock_flags);
1802 cmd->ioa_cb->ioasa.ioasc =
1803 cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
1804 cmd->ioa_cb->ioasa.ilid =
1805 cpu_to_be32(PMCRAID_DRIVER_ILID);
1806
1807 /* In case the command timer is still running */
1808 del_timer(&cmd->timer);
1809
1810 /* If this is an IO command, complete it by invoking scsi_done
1811 * function. If this is one of the internal commands other
1812 * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
1813 * complete it
1814 */
1815 if (cmd->scsi_cmd) {
1816
1817 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
1818 __le32 resp = cmd->ioa_cb->ioarcb.response_handle;
1819
1820 scsi_cmd->result |= DID_ERROR << 16;
1821
1822 scsi_dma_unmap(scsi_cmd);
1823 pmcraid_return_cmd(cmd);
1824
1825
1826 pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
1827 le32_to_cpu(resp) >> 2,
1828 cmd->ioa_cb->ioarcb.cdb[0],
1829 scsi_cmd->result);
1830 scsi_cmd->scsi_done(scsi_cmd);
1831 } else if (cmd->cmd_done == pmcraid_internal_done ||
1832 cmd->cmd_done == pmcraid_erp_done) {
1833 cmd->cmd_done(cmd);
1834 } else if (cmd->cmd_done != pmcraid_ioa_reset) {
1835 pmcraid_return_cmd(cmd);
1836 }
1837
1838 atomic_dec(&pinstance->outstanding_cmds);
1839 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
1840 }
1841
1842 spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
1843}
1844
1845/**
1846 * pmcraid_ioa_reset - Implementation of IOA reset logic
1847 *
1848 * @cmd: pointer to the cmd block to be used for entire reset process
1849 *
1850 * This function executes most of the steps required for IOA reset. This gets
1851 * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
1852 * 'eh_' thread. Access to variables used for controling the reset sequence is
1853 * synchronized using host lock. Various functions called during reset process
1854 * would make use of a single command block, pointer to which is also stored in
1855 * adapter instance structure.
1856 *
1857 * Return Value
1858 * None
1859 */
1860static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
1861{
1862 struct pmcraid_instance *pinstance = cmd->drv_inst;
1863 u8 reset_complete = 0;
1864
1865 pinstance->ioa_reset_in_progress = 1;
1866
1867 if (pinstance->reset_cmd != cmd) {
1868 pmcraid_err("reset is called with different command block\n");
1869 pinstance->reset_cmd = cmd;
1870 }
1871
1872 pmcraid_info("reset_engine: state = %d, command = %p\n",
1873 pinstance->ioa_state, cmd);
1874
1875 switch (pinstance->ioa_state) {
1876
1877 case IOA_STATE_DEAD:
1878 /* If IOA is offline, whatever may be the reset reason, just
1879 * return. callers might be waiting on the reset wait_q, wake
1880 * up them
1881 */
1882 pmcraid_err("IOA is offline no reset is possible\n");
1883 reset_complete = 1;
1884 break;
1885
1886 case IOA_STATE_IN_BRINGDOWN:
1887 /* we enter here, once ioa shutdown command is processed by IOA
1888 * Alert IOA for a possible reset. If reset alert fails, IOA
1889 * goes through hard-reset
1890 */
1891 pmcraid_disable_interrupts(pinstance, ~0);
1892 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1893 pmcraid_reset_alert(cmd);
1894 break;
1895
1896 case IOA_STATE_UNKNOWN:
1897 /* We may be called during probe or resume. Some pre-processing
1898 * is required for prior to reset
1899 */
1900 scsi_block_requests(pinstance->host);
1901
1902 /* If asked to reset while IOA was processing responses or
1903 * there are any error responses then IOA may require
1904 * hard-reset.
1905 */
1906 if (pinstance->ioa_hard_reset == 0) {
1907 if (ioread32(pinstance->ioa_status) &
1908 INTRS_TRANSITION_TO_OPERATIONAL) {
1909 pmcraid_info("sticky bit set, bring-up\n");
1910 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
1911 pmcraid_reinit_cmdblk(cmd);
1912 pmcraid_identify_hrrq(cmd);
1913 } else {
1914 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
1915 pmcraid_soft_reset(cmd);
1916 }
1917 } else {
1918 /* Alert IOA of a possible reset and wait for critical
1919 * operation in progress bit to reset
1920 */
1921 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1922 pmcraid_reset_alert(cmd);
1923 }
1924 break;
1925
1926 case IOA_STATE_IN_RESET_ALERT:
1927 /* If critical operation in progress bit is reset or wait gets
1928 * timed out, reset proceeds with starting BIST on the IOA.
1929 * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
1930 * they are 3 or more, reset engine marks IOA dead and returns
1931 */
1932 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
1933 pmcraid_start_bist(cmd);
1934 break;
1935
1936 case IOA_STATE_IN_HARD_RESET:
1937 pinstance->ioa_reset_attempts++;
1938
1939 /* retry reset if we haven't reached maximum allowed limit */
1940 if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
1941 pinstance->ioa_reset_attempts = 0;
1942 pmcraid_err("IOA didn't respond marking it as dead\n");
1943 pinstance->ioa_state = IOA_STATE_DEAD;
1944 reset_complete = 1;
1945 break;
1946 }
1947
1948 /* Once either bist or pci reset is done, restore PCI config
1949 * space. If this fails, proceed with hard reset again
1950 */
1951
1952 if (pci_restore_state(pinstance->pdev)) {
1953 pmcraid_info("config-space error resetting again\n");
1954 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1955 pmcraid_reset_alert(cmd);
1956 break;
1957 }
1958
1959 /* fail all pending commands */
1960 pmcraid_fail_outstanding_cmds(pinstance);
1961
1962 /* check if unit check is active, if so extract dump */
1963 if (pinstance->ioa_unit_check) {
1964 pmcraid_info("unit check is active\n");
1965 pinstance->ioa_unit_check = 0;
1966 pmcraid_get_dump(pinstance);
1967 pinstance->ioa_reset_attempts--;
1968 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1969 pmcraid_reset_alert(cmd);
1970 break;
1971 }
1972
1973 /* if the reset reason is to bring-down the ioa, we might be
1974 * done with the reset restore pci_config_space and complete
1975 * the reset
1976 */
1977 if (pinstance->ioa_bringdown) {
1978 pmcraid_info("bringing down the adapter\n");
1979 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
1980 pinstance->ioa_bringdown = 0;
1981 pinstance->ioa_state = IOA_STATE_UNKNOWN;
1982 reset_complete = 1;
1983 } else {
1984 /* bring-up IOA, so proceed with soft reset
1985 * Reinitialize hrrq_buffers and their indices also
1986 * enable interrupts after a pci_restore_state
1987 */
1988 if (pmcraid_reset_enable_ioa(pinstance)) {
1989 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
1990 pmcraid_info("bringing up the adapter\n");
1991 pmcraid_reinit_cmdblk(cmd);
1992 pmcraid_identify_hrrq(cmd);
1993 } else {
1994 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
1995 pmcraid_soft_reset(cmd);
1996 }
1997 }
1998 break;
1999
2000 case IOA_STATE_IN_SOFT_RESET:
2001 /* TRANSITION TO OPERATIONAL is on so start initialization
2002 * sequence
2003 */
2004 pmcraid_info("In softreset proceeding with bring-up\n");
2005 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2006
2007 /* Initialization commands start with HRRQ identification. From
2008 * now on tasklet completes most of the commands as IOA is up
2009 * and intrs are enabled
2010 */
2011 pmcraid_identify_hrrq(cmd);
2012 break;
2013
2014 case IOA_STATE_IN_BRINGUP:
2015 /* we are done with bringing up of IOA, change the ioa_state to
2016 * operational and wake up any waiters
2017 */
2018 pinstance->ioa_state = IOA_STATE_OPERATIONAL;
2019 reset_complete = 1;
2020 break;
2021
2022 case IOA_STATE_OPERATIONAL:
2023 default:
2024 /* When IOA is operational and a reset is requested, check for
2025 * the reset reason. If reset is to bring down IOA, unregister
2026 * HCAMs and initiate shutdown; if adapter reset is forced then
2027 * restart reset sequence again
2028 */
2029 if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
2030 pinstance->force_ioa_reset == 0) {
2031 reset_complete = 1;
2032 } else {
2033 if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
2034 pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
2035 pmcraid_reinit_cmdblk(cmd);
2036 pmcraid_unregister_hcams(cmd);
2037 }
2038 break;
2039 }
2040
2041 /* reset will be completed if ioa_state is either DEAD or UNKNOWN or
2042 * OPERATIONAL. Reset all control variables used during reset, wake up
2043 * any waiting threads and let the SCSI mid-layer send commands. Note
2044 * that host_lock must be held before invoking scsi_report_bus_reset.
2045 */
2046 if (reset_complete) {
2047 pinstance->ioa_reset_in_progress = 0;
2048 pinstance->ioa_reset_attempts = 0;
2049 pinstance->reset_cmd = NULL;
2050 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2051 pinstance->ioa_bringdown = 0;
2052 pmcraid_return_cmd(cmd);
2053
2054 /* If target state is to bring up the adapter, proceed with
2055 * hcam registration and resource exposure to mid-layer.
2056 */
2057 if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
2058 pmcraid_register_hcams(pinstance);
2059
2060 wake_up_all(&pinstance->reset_wait_q);
2061 }
2062
2063 return;
2064}
2065
2066/**
2067 * pmcraid_initiate_reset - initiates reset sequence. This is called from
2068 * ISR/tasklet during error interrupts including IOA unit check. If reset
2069 * is already in progress, it just returns, otherwise initiates IOA reset
2070 * to bring IOA up to operational state.
2071 *
2072 * @pinstance: pointer to adapter instance structure
2073 *
2074 * Return value
2075 * none
2076 */
2077static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
2078{
2079 struct pmcraid_cmd *cmd;
2080
2081 /* If the reset is already in progress, just return, otherwise start
2082 * reset sequence and return
2083 */
2084 if (!pinstance->ioa_reset_in_progress) {
2085 scsi_block_requests(pinstance->host);
2086 cmd = pmcraid_get_free_cmd(pinstance);
2087
2088 if (cmd == NULL) {
2089 pmcraid_err("no cmnd blocks for initiate_reset\n");
2090 return;
2091 }
2092
2093 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2094 pinstance->reset_cmd = cmd;
2095 pinstance->force_ioa_reset = 1;
2096 pmcraid_ioa_reset(cmd);
2097 }
2098}
2099
2100/**
2101 * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
2102 * or bringdown IOA
2103 * @pinstance: pointer adapter instance structure
2104 * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
2105 * @target_state: expected target state after reset
2106 *
2107 * Note: This command initiates reset and waits for its completion. Hence this
2108 * should not be called from isr/timer/tasklet functions (timeout handlers,
2109 * error response handlers and interrupt handlers).
2110 *
2111 * Return Value
2112 * 1 in case ioa_state is not target_state, 0 otherwise.
2113 */
2114static int pmcraid_reset_reload(
2115 struct pmcraid_instance *pinstance,
2116 u8 shutdown_type,
2117 u8 target_state
2118)
2119{
2120 struct pmcraid_cmd *reset_cmd = NULL;
2121 unsigned long lock_flags;
2122 int reset = 1;
2123
2124 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2125
2126 if (pinstance->ioa_reset_in_progress) {
2127 pmcraid_info("reset_reload: reset is already in progress\n");
2128
2129 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2130
2131 wait_event(pinstance->reset_wait_q,
2132 !pinstance->ioa_reset_in_progress);
2133
2134 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2135
2136 if (pinstance->ioa_state == IOA_STATE_DEAD) {
2137 spin_unlock_irqrestore(pinstance->host->host_lock,
2138 lock_flags);
2139 pmcraid_info("reset_reload: IOA is dead\n");
2140 return reset;
2141 } else if (pinstance->ioa_state == target_state) {
2142 reset = 0;
2143 }
2144 }
2145
2146 if (reset) {
2147 pmcraid_info("reset_reload: proceeding with reset\n");
2148 scsi_block_requests(pinstance->host);
2149 reset_cmd = pmcraid_get_free_cmd(pinstance);
2150
2151 if (reset_cmd == NULL) {
2152 pmcraid_err("no free cmnd for reset_reload\n");
2153 spin_unlock_irqrestore(pinstance->host->host_lock,
2154 lock_flags);
2155 return reset;
2156 }
2157
2158 if (shutdown_type == SHUTDOWN_NORMAL)
2159 pinstance->ioa_bringdown = 1;
2160
2161 pinstance->ioa_shutdown_type = shutdown_type;
2162 pinstance->reset_cmd = reset_cmd;
2163 pinstance->force_ioa_reset = reset;
2164 pmcraid_info("reset_reload: initiating reset\n");
2165 pmcraid_ioa_reset(reset_cmd);
2166 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2167 pmcraid_info("reset_reload: waiting for reset to complete\n");
2168 wait_event(pinstance->reset_wait_q,
2169 !pinstance->ioa_reset_in_progress);
2170
2171 pmcraid_info("reset_reload: reset is complete !! \n");
2172 scsi_unblock_requests(pinstance->host);
2173 if (pinstance->ioa_state == target_state)
2174 reset = 0;
2175 }
2176
2177 return reset;
2178}
2179
2180/**
2181 * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
2182 *
2183 * @pinstance: pointer to adapter instance structure
2184 *
2185 * Return Value
2186 * whatever is returned from pmcraid_reset_reload
2187 */
2188static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
2189{
2190 return pmcraid_reset_reload(pinstance,
2191 SHUTDOWN_NORMAL,
2192 IOA_STATE_UNKNOWN);
2193}
2194
2195/**
2196 * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
2197 *
2198 * @pinstance: pointer to adapter instance structure
2199 *
2200 * Return Value
2201 * whatever is returned from pmcraid_reset_reload
2202 */
2203static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
2204{
2205 return pmcraid_reset_reload(pinstance,
2206 SHUTDOWN_NONE,
2207 IOA_STATE_OPERATIONAL);
2208}
2209
2210/**
2211 * pmcraid_request_sense - Send request sense to a device
2212 * @cmd: pmcraid command struct
2213 *
2214 * This function sends a request sense to a device as a result of a check
2215 * condition. This method re-uses the same command block that failed earlier.
2216 */
2217static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2218{
2219 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2220 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2221
2222 /* allocate DMAable memory for sense buffers */
2223 cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
2224 SCSI_SENSE_BUFFERSIZE,
2225 &cmd->sense_buffer_dma);
2226
2227 if (cmd->sense_buffer == NULL) {
2228 pmcraid_err
2229 ("couldn't allocate sense buffer for request sense\n");
2230 pmcraid_erp_done(cmd);
2231 return;
2232 }
2233
2234 /* re-use the command block */
2235 memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
2236 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2237 ioarcb->request_flags0 = (SYNC_COMPLETE |
2238 NO_LINK_DESCS |
2239 INHIBIT_UL_CHECK);
2240 ioarcb->request_type = REQ_TYPE_SCSI;
2241 ioarcb->cdb[0] = REQUEST_SENSE;
2242 ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2243
2244 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
2245 offsetof(struct pmcraid_ioarcb,
2246 add_data.u.ioadl[0]));
2247 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
2248
2249 ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2250
2251 ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
2252 ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2253 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
2254
2255 /* request sense might be called as part of error response processing
2256 * which runs in tasklets context. It is possible that mid-layer might
2257 * schedule queuecommand during this time, hence, writting to IOARRIN
2258 * must be protect by host_lock
2259 */
2260 pmcraid_send_cmd(cmd, pmcraid_erp_done,
2261 PMCRAID_REQUEST_SENSE_TIMEOUT,
2262 pmcraid_timeout_handler);
2263}
2264
2265/**
2266 * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
2267 * @cmd: command that failed
2268 * @sense: true if request_sense is required after cancel all
2269 *
2270 * This function sends a cancel all to a device to clear the queue.
2271 */
2272static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
2273{
2274 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2275 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2276 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2277 void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
2278 : pmcraid_request_sense;
2279
2280 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2281 ioarcb->request_flags0 = SYNC_OVERRIDE;
2282 ioarcb->request_type = REQ_TYPE_IOACMD;
2283 ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
2284
2285 if (RES_IS_GSCSI(res->cfg_entry))
2286 ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
2287
2288 ioarcb->ioadl_bus_addr = 0;
2289 ioarcb->ioadl_length = 0;
2290 ioarcb->data_transfer_length = 0;
2291 ioarcb->ioarcb_bus_addr &= (~0x1FULL);
2292
2293 /* writing to IOARRIN must be protected by host_lock, as mid-layer
2294 * schedule queuecommand while we are doing this
2295 */
2296 pmcraid_send_cmd(cmd, cmd_done,
2297 PMCRAID_REQUEST_SENSE_TIMEOUT,
2298 pmcraid_timeout_handler);
2299}
2300
2301/**
2302 * pmcraid_frame_auto_sense: frame fixed format sense information
2303 *
2304 * @cmd: pointer to failing command block
2305 *
2306 * Return value
2307 * none
2308 */
2309static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
2310{
2311 u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
2312 struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
2313 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2314 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2315 u32 failing_lba = 0;
2316
2317 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
2318 cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
2319
2320 if (RES_IS_VSET(res->cfg_entry) &&
2321 ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
2322 ioasa->u.vset.failing_lba_hi != 0) {
2323
2324 sense_buf[0] = 0x72;
2325 sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2326 sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2327 sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2328
2329 sense_buf[7] = 12;
2330 sense_buf[8] = 0;
2331 sense_buf[9] = 0x0A;
2332 sense_buf[10] = 0x80;
2333
2334 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
2335
2336 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
2337 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
2338 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
2339 sense_buf[15] = failing_lba & 0x000000ff;
2340
2341 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
2342
2343 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
2344 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
2345 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
2346 sense_buf[19] = failing_lba & 0x000000ff;
2347 } else {
2348 sense_buf[0] = 0x70;
2349 sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2350 sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2351 sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2352
2353 if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
2354 if (RES_IS_VSET(res->cfg_entry))
2355 failing_lba =
2356 le32_to_cpu(ioasa->u.
2357 vset.failing_lba_lo);
2358 sense_buf[0] |= 0x80;
2359 sense_buf[3] = (failing_lba >> 24) & 0xff;
2360 sense_buf[4] = (failing_lba >> 16) & 0xff;
2361 sense_buf[5] = (failing_lba >> 8) & 0xff;
2362 sense_buf[6] = failing_lba & 0xff;
2363 }
2364
2365 sense_buf[7] = 6; /* additional length */
2366 }
2367}
2368
2369/**
2370 * pmcraid_error_handler - Error response handlers for a SCSI op
2371 * @cmd: pointer to pmcraid_cmd that has failed
2372 *
2373 * This function determines whether or not to initiate ERP on the affected
2374 * device. This is called from a tasklet, which doesn't hold any locks.
2375 *
2376 * Return value:
2377 * 0 it caller can complete the request, otherwise 1 where in error
2378 * handler itself completes the request and returns the command block
2379 * back to free-pool
2380 */
2381static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2382{
2383 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2384 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2385 struct pmcraid_instance *pinstance = cmd->drv_inst;
2386 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2387 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2388 u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
2389 u32 sense_copied = 0;
2390
2391 if (!res) {
2392 pmcraid_info("resource pointer is NULL\n");
2393 return 0;
2394 }
2395
2396 /* If this was a SCSI read/write command keep count of errors */
2397 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
2398 atomic_inc(&res->read_failures);
2399 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
2400 atomic_inc(&res->write_failures);
2401
2402 if (!RES_IS_GSCSI(res->cfg_entry) &&
2403 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
2404 pmcraid_frame_auto_sense(cmd);
2405 }
2406
2407 /* Log IOASC/IOASA information based on user settings */
2408 pmcraid_ioasc_logger(ioasc, cmd);
2409
2410 switch (masked_ioasc) {
2411
2412 case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
2413 scsi_cmd->result |= (DID_ABORT << 16);
2414 break;
2415
2416 case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
2417 case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
2418 scsi_cmd->result |= (DID_NO_CONNECT << 16);
2419 break;
2420
2421 case PMCRAID_IOASC_NR_SYNC_REQUIRED:
2422 res->sync_reqd = 1;
2423 scsi_cmd->result |= (DID_IMM_RETRY << 16);
2424 break;
2425
2426 case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
2427 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
2428 break;
2429
2430 case PMCRAID_IOASC_UA_BUS_WAS_RESET:
2431 case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
2432 if (!res->reset_progress)
2433 scsi_report_bus_reset(pinstance->host,
2434 scsi_cmd->device->channel);
2435 scsi_cmd->result |= (DID_ERROR << 16);
2436 break;
2437
2438 case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
2439 scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
2440 res->sync_reqd = 1;
2441
2442 /* if check_condition is not active return with error otherwise
2443 * get/frame the sense buffer
2444 */
2445 if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
2446 SAM_STAT_CHECK_CONDITION &&
2447 PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
2448 return 0;
2449
2450 /* If we have auto sense data as part of IOASA pass it to
2451 * mid-layer
2452 */
2453 if (ioasa->auto_sense_length != 0) {
2454 short sense_len = ioasa->auto_sense_length;
2455 int data_size = min_t(u16, le16_to_cpu(sense_len),
2456 SCSI_SENSE_BUFFERSIZE);
2457
2458 memcpy(scsi_cmd->sense_buffer,
2459 ioasa->sense_data,
2460 data_size);
2461 sense_copied = 1;
2462 }
2463
2464 if (RES_IS_GSCSI(res->cfg_entry)) {
2465 pmcraid_cancel_all(cmd, sense_copied);
2466 } else if (sense_copied) {
2467 pmcraid_erp_done(cmd);
2468 return 0;
2469 } else {
2470 pmcraid_request_sense(cmd);
2471 }
2472
2473 return 1;
2474
2475 case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
2476 break;
2477
2478 default:
2479 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
2480 scsi_cmd->result |= (DID_ERROR << 16);
2481 break;
2482 }
2483 return 0;
2484}
2485
2486/**
2487 * pmcraid_reset_device - device reset handler functions
2488 *
2489 * @scsi_cmd: scsi command struct
2490 * @modifier: reset modifier indicating the reset sequence to be performed
2491 *
2492 * This function issues a device reset to the affected device.
2493 * A LUN reset will be sent to the device first. If that does
2494 * not work, a target reset will be sent.
2495 *
2496 * Return value:
2497 * SUCCESS / FAILED
2498 */
2499static int pmcraid_reset_device(
2500 struct scsi_cmnd *scsi_cmd,
2501 unsigned long timeout,
2502 u8 modifier
2503)
2504{
2505 struct pmcraid_cmd *cmd;
2506 struct pmcraid_instance *pinstance;
2507 struct pmcraid_resource_entry *res;
2508 struct pmcraid_ioarcb *ioarcb;
2509 unsigned long lock_flags;
2510 u32 ioasc;
2511
2512 pinstance =
2513 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2514 res = scsi_cmd->device->hostdata;
2515
2516 if (!res) {
2517 pmcraid_err("reset_device: NULL resource pointer\n");
2518 return FAILED;
2519 }
2520
2521 /* If adapter is currently going through reset/reload, return failed.
2522 * This will force the mid-layer to call _eh_bus/host reset, which
2523 * will then go to sleep and wait for the reset to complete
2524 */
2525 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2526 if (pinstance->ioa_reset_in_progress ||
2527 pinstance->ioa_state == IOA_STATE_DEAD) {
2528 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2529 return FAILED;
2530 }
2531
2532 res->reset_progress = 1;
2533 pmcraid_info("Resetting %s resource with addr %x\n",
2534 ((modifier & RESET_DEVICE_LUN) ? "LUN" :
2535 ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
2536 le32_to_cpu(res->cfg_entry.resource_address));
2537
2538 /* get a free cmd block */
2539 cmd = pmcraid_get_free_cmd(pinstance);
2540
2541 if (cmd == NULL) {
2542 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2543 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2544 return FAILED;
2545 }
2546
2547 ioarcb = &cmd->ioa_cb->ioarcb;
2548 ioarcb->resource_handle = res->cfg_entry.resource_handle;
2549 ioarcb->request_type = REQ_TYPE_IOACMD;
2550 ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
2551
2552 /* Initialize reset modifier bits */
2553 if (modifier)
2554 modifier = ENABLE_RESET_MODIFIER | modifier;
2555
2556 ioarcb->cdb[1] = modifier;
2557
2558 init_completion(&cmd->wait_for_completion);
2559 cmd->completion_req = 1;
2560
2561 pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
2562 cmd->ioa_cb->ioarcb.cdb[0],
2563 le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
2564 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
2565
2566 pmcraid_send_cmd(cmd,
2567 pmcraid_internal_done,
2568 timeout,
2569 pmcraid_timeout_handler);
2570
2571 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2572
2573 /* RESET_DEVICE command completes after all pending IOARCBs are
2574 * completed. Once this command is completed, pmcraind_internal_done
2575 * will wake up the 'completion' queue.
2576 */
2577 wait_for_completion(&cmd->wait_for_completion);
2578
2579 /* complete the command here itself and return the command block
2580 * to free list
2581 */
2582 pmcraid_return_cmd(cmd);
2583 res->reset_progress = 0;
2584 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2585
2586 /* set the return value based on the returned ioasc */
2587 return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2588}
2589
2590/**
2591 * _pmcraid_io_done - helper for pmcraid_io_done function
2592 *
2593 * @cmd: pointer to pmcraid command struct
2594 * @reslen: residual data length to be set in the ioasa
2595 * @ioasc: ioasc either returned by IOA or set by driver itself.
2596 *
2597 * This function is invoked by pmcraid_io_done to complete mid-layer
2598 * scsi ops.
2599 *
2600 * Return value:
2601 * 0 if caller is required to return it to free_pool. Returns 1 if
2602 * caller need not worry about freeing command block as error handler
2603 * will take care of that.
2604 */
2605
2606static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
2607{
2608 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2609 int rc = 0;
2610
2611 scsi_set_resid(scsi_cmd, reslen);
2612
2613 pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
2614 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
2615 cmd->ioa_cb->ioarcb.cdb[0],
2616 ioasc, scsi_cmd->result);
2617
2618 if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
2619 rc = pmcraid_error_handler(cmd);
2620
2621 if (rc == 0) {
2622 scsi_dma_unmap(scsi_cmd);
2623 scsi_cmd->scsi_done(scsi_cmd);
2624 }
2625
2626 return rc;
2627}
2628
2629/**
2630 * pmcraid_io_done - SCSI completion function
2631 *
2632 * @cmd: pointer to pmcraid command struct
2633 *
2634 * This function is invoked by tasklet/mid-layer error handler to completing
2635 * the SCSI ops sent from mid-layer.
2636 *
2637 * Return value
2638 * none
2639 */
2640
2641static void pmcraid_io_done(struct pmcraid_cmd *cmd)
2642{
2643 u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2644 u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
2645
2646 if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
2647 pmcraid_return_cmd(cmd);
2648}
2649
2650/**
2651 * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
2652 *
2653 * @cmd: command block of the command to be aborted
2654 *
2655 * Return Value:
2656 * returns pointer to command structure used as cancelling cmd
2657 */
2658static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
2659{
2660 struct pmcraid_cmd *cancel_cmd;
2661 struct pmcraid_instance *pinstance;
2662 struct pmcraid_resource_entry *res;
2663
2664 pinstance = (struct pmcraid_instance *)cmd->drv_inst;
2665 res = cmd->scsi_cmd->device->hostdata;
2666
2667 cancel_cmd = pmcraid_get_free_cmd(pinstance);
2668
2669 if (cancel_cmd == NULL) {
2670 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2671 return NULL;
2672 }
2673
2674 pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
2675
2676 pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
2677 cmd->ioa_cb->ioarcb.cdb[0],
2678 cmd->ioa_cb->ioarcb.response_handle >> 2);
2679
2680 init_completion(&cancel_cmd->wait_for_completion);
2681 cancel_cmd->completion_req = 1;
2682
2683 pmcraid_info("command (%d) CDB[0] = %x for %x\n",
2684 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
2685 cmd->ioa_cb->ioarcb.cdb[0],
2686 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
2687
2688 pmcraid_send_cmd(cancel_cmd,
2689 pmcraid_internal_done,
2690 PMCRAID_INTERNAL_TIMEOUT,
2691 pmcraid_timeout_handler);
2692 return cancel_cmd;
2693}
2694
2695/**
2696 * pmcraid_abort_complete - Waits for ABORT TASK completion
2697 *
2698 * @cancel_cmd: command block use as cancelling command
2699 *
2700 * Return Value:
2701 * returns SUCCESS if ABORT TASK has good completion
2702 * otherwise FAILED
2703 */
2704static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
2705{
2706 struct pmcraid_resource_entry *res;
2707 u32 ioasc;
2708
2709 wait_for_completion(&cancel_cmd->wait_for_completion);
2710 res = cancel_cmd->u.res;
2711 cancel_cmd->u.res = NULL;
2712 ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
2713
2714 /* If the abort task is not timed out we will get a Good completion
2715 * as sense_key, otherwise we may get one the following responses
2716 * due to subsquent bus reset or device reset. In case IOASC is
2717 * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
2718 */
2719 if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
2720 ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
2721 if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
2722 res->sync_reqd = 1;
2723 ioasc = 0;
2724 }
2725
2726 /* complete the command here itself */
2727 pmcraid_return_cmd(cancel_cmd);
2728 return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2729}
2730
2731/**
2732 * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
2733 *
2734 * @scsi_cmd: scsi command struct given by mid-layer. When this is called
2735 * mid-layer ensures that no other commands are queued. This
2736 * never gets called under interrupt, but a separate eh thread.
2737 *
2738 * Return value:
2739 * SUCCESS / FAILED
2740 */
2741static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2742{
2743 struct pmcraid_instance *pinstance;
2744 struct pmcraid_cmd *cmd;
2745 struct pmcraid_resource_entry *res;
2746 unsigned long host_lock_flags;
2747 unsigned long pending_lock_flags;
2748 struct pmcraid_cmd *cancel_cmd = NULL;
2749 int cmd_found = 0;
2750 int rc = FAILED;
2751
2752 pinstance =
2753 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2754
2755 dev_err(&pinstance->pdev->dev,
2756 "I/O command timed out, aborting it.\n");
2757
2758 res = scsi_cmd->device->hostdata;
2759
2760 if (res == NULL)
2761 return rc;
2762
2763 /* If we are currently going through reset/reload, return failed.
2764 * This will force the mid-layer to eventually call
2765 * pmcraid_eh_host_reset which will then go to sleep and wait for the
2766 * reset to complete
2767 */
2768 spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
2769
2770 if (pinstance->ioa_reset_in_progress ||
2771 pinstance->ioa_state == IOA_STATE_DEAD) {
2772 spin_unlock_irqrestore(pinstance->host->host_lock,
2773 host_lock_flags);
2774 return rc;
2775 }
2776
2777 /* loop over pending cmd list to find cmd corresponding to this
2778 * scsi_cmd. Note that this command might not have been completed
2779 * already. locking: all pending commands are protected with
2780 * pending_pool_lock.
2781 */
2782 spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
2783 list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
2784
2785 if (cmd->scsi_cmd == scsi_cmd) {
2786 cmd_found = 1;
2787 break;
2788 }
2789 }
2790
2791 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
2792 pending_lock_flags);
2793
2794 /* If the command to be aborted was given to IOA and still pending with
2795 * it, send ABORT_TASK to abort this and wait for its completion
2796 */
2797 if (cmd_found)
2798 cancel_cmd = pmcraid_abort_cmd(cmd);
2799
2800 spin_unlock_irqrestore(pinstance->host->host_lock,
2801 host_lock_flags);
2802
2803 if (cancel_cmd) {
2804 cancel_cmd->u.res = cmd->scsi_cmd->device->hostdata;
2805 rc = pmcraid_abort_complete(cancel_cmd);
2806 }
2807
2808 return cmd_found ? rc : SUCCESS;
2809}
2810
2811/**
2812 * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
2813 *
2814 * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
2815 *
2816 * All these routines invokve pmcraid_reset_device with appropriate parameters.
2817 * Since these are called from mid-layer EH thread, no other IO will be queued
2818 * to the resource being reset. However, control path (IOCTL) may be active so
2819 * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
2820 * takes care by locking/unlocking host_lock.
2821 *
2822 * Return value
2823 * SUCCESS or FAILED
2824 */
2825static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
2826{
2827 pmcraid_err("Doing device reset due to an I/O command timeout.\n");
2828 return pmcraid_reset_device(scmd,
2829 PMCRAID_INTERNAL_TIMEOUT,
2830 RESET_DEVICE_LUN);
2831}
2832
2833static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
2834{
2835 pmcraid_err("Doing bus reset due to an I/O command timeout.\n");
2836 return pmcraid_reset_device(scmd,
2837 PMCRAID_RESET_BUS_TIMEOUT,
2838 RESET_DEVICE_BUS);
2839}
2840
2841static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
2842{
2843 pmcraid_err("Doing target reset due to an I/O command timeout.\n");
2844 return pmcraid_reset_device(scmd,
2845 PMCRAID_INTERNAL_TIMEOUT,
2846 RESET_DEVICE_TARGET);
2847}
2848
2849/**
2850 * pmcraid_eh_host_reset_handler - adapter reset handler callback
2851 *
2852 * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
2853 *
2854 * Initiates adapter reset to bring it up to operational state
2855 *
2856 * Return value
2857 * SUCCESS or FAILED
2858 */
2859static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
2860{
2861 unsigned long interval = 10000; /* 10 seconds interval */
2862 int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
2863 struct pmcraid_instance *pinstance =
2864 (struct pmcraid_instance *)(scmd->device->host->hostdata);
2865
2866
2867 /* wait for an additional 150 seconds just in case firmware could come
2868 * up and if it could complete all the pending commands excluding the
2869 * two HCAM (CCN and LDN).
2870 */
2871 while (waits--) {
2872 if (atomic_read(&pinstance->outstanding_cmds) <=
2873 PMCRAID_MAX_HCAM_CMD)
2874 return SUCCESS;
2875 msleep(interval);
2876 }
2877
2878 dev_err(&pinstance->pdev->dev,
2879 "Adapter being reset due to an I/O command timeout.\n");
2880 return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
2881}
2882
2883/**
2884 * pmcraid_task_attributes - Translate SPI Q-Tags to task attributes
2885 * @scsi_cmd: scsi command struct
2886 *
2887 * Return value
2888 * number of tags or 0 if the task is not tagged
2889 */
2890static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd)
2891{
2892 char tag[2];
2893 u8 rc = 0;
2894
2895 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
2896 switch (tag[0]) {
2897 case MSG_SIMPLE_TAG:
2898 rc = TASK_TAG_SIMPLE;
2899 break;
2900 case MSG_HEAD_TAG:
2901 rc = TASK_TAG_QUEUE_HEAD;
2902 break;
2903 case MSG_ORDERED_TAG:
2904 rc = TASK_TAG_ORDERED;
2905 break;
2906 };
2907 }
2908
2909 return rc;
2910}
2911
2912
2913/**
2914 * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
2915 * @cmd: pmcraid command struct
2916 * @sgcount: count of scatter-gather elements
2917 *
2918 * Return value
2919 * returns pointer pmcraid_ioadl_desc, initialized to point to internal
2920 * or external IOADLs
2921 */
2922struct pmcraid_ioadl_desc *
2923pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
2924{
2925 struct pmcraid_ioadl_desc *ioadl;
2926 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2927 int ioadl_count = 0;
2928
2929 if (ioarcb->add_cmd_param_length)
2930 ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
2931 ioarcb->ioadl_length =
2932 sizeof(struct pmcraid_ioadl_desc) * sgcount;
2933
2934 if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
2935 /* external ioadls start at offset 0x80 from control_block
2936 * structure, re-using 24 out of 27 ioadls part of IOARCB.
2937 * It is necessary to indicate to firmware that driver is
2938 * using ioadls to be treated as external to IOARCB.
2939 */
2940 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
2941 ioarcb->ioadl_bus_addr =
2942 cpu_to_le64((cmd->ioa_cb_bus_addr) +
2943 offsetof(struct pmcraid_ioarcb,
2944 add_data.u.ioadl[3]));
2945 ioadl = &ioarcb->add_data.u.ioadl[3];
2946 } else {
2947 ioarcb->ioadl_bus_addr =
2948 cpu_to_le64((cmd->ioa_cb_bus_addr) +
2949 offsetof(struct pmcraid_ioarcb,
2950 add_data.u.ioadl[ioadl_count]));
2951
2952 ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
2953 ioarcb->ioarcb_bus_addr |=
2954 DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
2955 }
2956
2957 return ioadl;
2958}
2959
2960/**
2961 * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
2962 * @pinstance: pointer to adapter instance structure
2963 * @cmd: pmcraid command struct
2964 *
2965 * This function is invoked by queuecommand entry point while sending a command
2966 * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
2967 *
2968 * Return value:
2969 * 0 on success or -1 on failure
2970 */
2971static int pmcraid_build_ioadl(
2972 struct pmcraid_instance *pinstance,
2973 struct pmcraid_cmd *cmd
2974)
2975{
2976 int i, nseg;
2977 struct scatterlist *sglist;
2978
2979 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2980 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
2981 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2982
2983 u32 length = scsi_bufflen(scsi_cmd);
2984
2985 if (!length)
2986 return 0;
2987
2988 nseg = scsi_dma_map(scsi_cmd);
2989
2990 if (nseg < 0) {
2991 dev_err(&pinstance->pdev->dev, "scsi_map_dma failed!\n");
2992 return -1;
2993 } else if (nseg > PMCRAID_MAX_IOADLS) {
2994 scsi_dma_unmap(scsi_cmd);
2995 dev_err(&pinstance->pdev->dev,
2996 "sg count is (%d) more than allowed!\n", nseg);
2997 return -1;
2998 }
2999
3000 /* Initialize IOARCB data transfer length fields */
3001 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
3002 ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
3003
3004 ioarcb->request_flags0 |= NO_LINK_DESCS;
3005 ioarcb->data_transfer_length = cpu_to_le32(length);
3006 ioadl = pmcraid_init_ioadls(cmd, nseg);
3007
3008 /* Initialize IOADL descriptor addresses */
3009 scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
3010 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
3011 ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
3012 ioadl[i].flags = 0;
3013 }
3014 /* setup last descriptor */
3015 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
3016
3017 return 0;
3018}
3019
3020/**
3021 * pmcraid_free_sglist - Frees an allocated SG buffer list
3022 * @sglist: scatter/gather list pointer
3023 *
3024 * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
3025 *
3026 * Return value:
3027 * none
3028 */
3029static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
3030{
3031 int i;
3032
3033 for (i = 0; i < sglist->num_sg; i++)
3034 __free_pages(sg_page(&(sglist->scatterlist[i])),
3035 sglist->order);
3036
3037 kfree(sglist);
3038}
3039
3040/**
3041 * pmcraid_alloc_sglist - Allocates memory for a SG list
3042 * @buflen: buffer length
3043 *
3044 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3045 * list.
3046 *
3047 * Return value
3048 * pointer to sglist / NULL on failure
3049 */
3050static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
3051{
3052 struct pmcraid_sglist *sglist;
3053 struct scatterlist *scatterlist;
3054 struct page *page;
3055 int num_elem, i, j;
3056 int sg_size;
3057 int order;
3058 int bsize_elem;
3059
3060 sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
3061 order = (sg_size > 0) ? get_order(sg_size) : 0;
3062 bsize_elem = PAGE_SIZE * (1 << order);
3063
3064 /* Determine the actual number of sg entries needed */
3065 if (buflen % bsize_elem)
3066 num_elem = (buflen / bsize_elem) + 1;
3067 else
3068 num_elem = buflen / bsize_elem;
3069
3070 /* Allocate a scatter/gather list for the DMA */
3071 sglist = kzalloc(sizeof(struct pmcraid_sglist) +
3072 (sizeof(struct scatterlist) * (num_elem - 1)),
3073 GFP_KERNEL);
3074
3075 if (sglist == NULL)
3076 return NULL;
3077
3078 scatterlist = sglist->scatterlist;
3079 sg_init_table(scatterlist, num_elem);
3080 sglist->order = order;
3081 sglist->num_sg = num_elem;
3082 sg_size = buflen;
3083
3084 for (i = 0; i < num_elem; i++) {
3085 page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
3086 if (!page) {
3087 for (j = i - 1; j >= 0; j--)
3088 __free_pages(sg_page(&scatterlist[j]), order);
3089 kfree(sglist);
3090 return NULL;
3091 }
3092
3093 sg_set_page(&scatterlist[i], page,
3094 sg_size < bsize_elem ? sg_size : bsize_elem, 0);
3095 sg_size -= bsize_elem;
3096 }
3097
3098 return sglist;
3099}
3100
3101/**
3102 * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
3103 * @sglist: scatter/gather list pointer
3104 * @buffer: buffer pointer
3105 * @len: buffer length
3106 * @direction: data transfer direction
3107 *
3108 * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
3109 *
3110 * Return value:
3111 * 0 on success / other on failure
3112 */
3113static int pmcraid_copy_sglist(
3114 struct pmcraid_sglist *sglist,
3115 unsigned long buffer,
3116 u32 len,
3117 int direction
3118)
3119{
3120 struct scatterlist *scatterlist;
3121 void *kaddr;
3122 int bsize_elem;
3123 int i;
3124 int rc = 0;
3125
3126 /* Determine the actual number of bytes per element */
3127 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3128
3129 scatterlist = sglist->scatterlist;
3130
3131 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3132 struct page *page = sg_page(&scatterlist[i]);
3133
3134 kaddr = kmap(page);
3135 if (direction == DMA_TO_DEVICE)
3136 rc = __copy_from_user(kaddr,
3137 (void *)buffer,
3138 bsize_elem);
3139 else
3140 rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
3141
3142 kunmap(page);
3143
3144 if (rc) {
3145 pmcraid_err("failed to copy user data into sg list\n");
3146 return -EFAULT;
3147 }
3148
3149 scatterlist[i].length = bsize_elem;
3150 }
3151
3152 if (len % bsize_elem) {
3153 struct page *page = sg_page(&scatterlist[i]);
3154
3155 kaddr = kmap(page);
3156
3157 if (direction == DMA_TO_DEVICE)
3158 rc = __copy_from_user(kaddr,
3159 (void *)buffer,
3160 len % bsize_elem);
3161 else
3162 rc = __copy_to_user((void *)buffer,
3163 kaddr,
3164 len % bsize_elem);
3165
3166 kunmap(page);
3167
3168 scatterlist[i].length = len % bsize_elem;
3169 }
3170
3171 if (rc) {
3172 pmcraid_err("failed to copy user data into sg list\n");
3173 rc = -EFAULT;
3174 }
3175
3176 return rc;
3177}
3178
3179/**
3180 * pmcraid_queuecommand - Queue a mid-layer request
3181 * @scsi_cmd: scsi command struct
3182 * @done: done function
3183 *
3184 * This function queues a request generated by the mid-layer. Midlayer calls
3185 * this routine within host->lock. Some of the functions called by queuecommand
3186 * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
3187 *
3188 * Return value:
3189 * 0 on success
3190 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3191 * SCSI_MLQUEUE_HOST_BUSY if host is busy
3192 */
3193static int pmcraid_queuecommand(
3194 struct scsi_cmnd *scsi_cmd,
3195 void (*done) (struct scsi_cmnd *)
3196)
3197{
3198 struct pmcraid_instance *pinstance;
3199 struct pmcraid_resource_entry *res;
3200 struct pmcraid_ioarcb *ioarcb;
3201 struct pmcraid_cmd *cmd;
3202 int rc = 0;
3203
3204 pinstance =
3205 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
3206
3207 scsi_cmd->scsi_done = done;
3208 res = scsi_cmd->device->hostdata;
3209 scsi_cmd->result = (DID_OK << 16);
3210
3211 /* if adapter is marked as dead, set result to DID_NO_CONNECT complete
3212 * the command
3213 */
3214 if (pinstance->ioa_state == IOA_STATE_DEAD) {
3215 pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
3216 scsi_cmd->result = (DID_NO_CONNECT << 16);
3217 scsi_cmd->scsi_done(scsi_cmd);
3218 return 0;
3219 }
3220
3221 /* If IOA reset is in progress, can't queue the commands */
3222 if (pinstance->ioa_reset_in_progress)
3223 return SCSI_MLQUEUE_HOST_BUSY;
3224
3225 /* initialize the command and IOARCB to be sent to IOA */
3226 cmd = pmcraid_get_free_cmd(pinstance);
3227
3228 if (cmd == NULL) {
3229 pmcraid_err("free command block is not available\n");
3230 return SCSI_MLQUEUE_HOST_BUSY;
3231 }
3232
3233 cmd->scsi_cmd = scsi_cmd;
3234 ioarcb = &(cmd->ioa_cb->ioarcb);
3235 memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3236 ioarcb->resource_handle = res->cfg_entry.resource_handle;
3237 ioarcb->request_type = REQ_TYPE_SCSI;
3238
3239 cmd->cmd_done = pmcraid_io_done;
3240
3241 if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
3242 if (scsi_cmd->underflow == 0)
3243 ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
3244
3245 if (res->sync_reqd) {
3246 ioarcb->request_flags0 |= SYNC_COMPLETE;
3247 res->sync_reqd = 0;
3248 }
3249
3250 ioarcb->request_flags0 |= NO_LINK_DESCS;
3251 ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd);
3252
3253 if (RES_IS_GSCSI(res->cfg_entry))
3254 ioarcb->request_flags1 |= DELAY_AFTER_RESET;
3255 }
3256
3257 rc = pmcraid_build_ioadl(pinstance, cmd);
3258
3259 pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
3260 le32_to_cpu(ioarcb->response_handle) >> 2,
3261 scsi_cmd->cmnd[0], pinstance->host->unique_id,
3262 RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
3263 PMCRAID_PHYS_BUS_ID,
3264 RES_IS_VSET(res->cfg_entry) ?
3265 res->cfg_entry.unique_flags1 :
3266 RES_TARGET(res->cfg_entry.resource_address),
3267 RES_LUN(res->cfg_entry.resource_address));
3268
3269 if (likely(rc == 0)) {
3270 _pmcraid_fire_command(cmd);
3271 } else {
3272 pmcraid_err("queuecommand could not build ioadl\n");
3273 pmcraid_return_cmd(cmd);
3274 rc = SCSI_MLQUEUE_HOST_BUSY;
3275 }
3276
3277 return rc;
3278}
3279
3280/**
3281 * pmcraid_open -char node "open" entry, allowed only users with admin access
3282 */
3283static int pmcraid_chr_open(struct inode *inode, struct file *filep)
3284{
3285 struct pmcraid_instance *pinstance;
3286
3287 if (!capable(CAP_SYS_ADMIN))
3288 return -EACCES;
3289
3290 /* Populate adapter instance * pointer for use by ioctl */
3291 pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
3292 filep->private_data = pinstance;
3293
3294 return 0;
3295}
3296
3297/**
3298 * pmcraid_release - char node "release" entry point
3299 */
3300static int pmcraid_chr_release(struct inode *inode, struct file *filep)
3301{
3302 struct pmcraid_instance *pinstance =
3303 ((struct pmcraid_instance *)filep->private_data);
3304
3305 filep->private_data = NULL;
3306 fasync_helper(-1, filep, 0, &pinstance->aen_queue);
3307
3308 return 0;
3309}
3310
3311/**
3312 * pmcraid_fasync - Async notifier registration from applications
3313 *
3314 * This function adds the calling process to a driver global queue. When an
3315 * event occurs, SIGIO will be sent to all processes in this queue.
3316 */
3317static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
3318{
3319 struct pmcraid_instance *pinstance;
3320 int rc;
3321
3322 pinstance = (struct pmcraid_instance *)filep->private_data;
3323 mutex_lock(&pinstance->aen_queue_lock);
3324 rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
3325 mutex_unlock(&pinstance->aen_queue_lock);
3326
3327 return rc;
3328}
3329
3330
3331/**
3332 * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
3333 * commands sent over IOCTL interface
3334 *
3335 * @cmd : pointer to struct pmcraid_cmd
3336 * @buflen : length of the request buffer
3337 * @direction : data transfer direction
3338 *
3339 * Return value
3340 * 0 on sucess, non-zero error code on failure
3341 */
3342static int pmcraid_build_passthrough_ioadls(
3343 struct pmcraid_cmd *cmd,
3344 int buflen,
3345 int direction
3346)
3347{
3348 struct pmcraid_sglist *sglist = NULL;
3349 struct scatterlist *sg = NULL;
3350 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
3351 struct pmcraid_ioadl_desc *ioadl;
3352 int i;
3353
3354 sglist = pmcraid_alloc_sglist(buflen);
3355
3356 if (!sglist) {
3357 pmcraid_err("can't allocate memory for passthrough SGls\n");
3358 return -ENOMEM;
3359 }
3360
3361 sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
3362 sglist->scatterlist,
3363 sglist->num_sg, direction);
3364
3365 if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
3366 dev_err(&cmd->drv_inst->pdev->dev,
3367 "Failed to map passthrough buffer!\n");
3368 pmcraid_free_sglist(sglist);
3369 return -EIO;
3370 }
3371
3372 cmd->sglist = sglist;
3373 ioarcb->request_flags0 |= NO_LINK_DESCS;
3374
3375 ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
3376
3377 /* Initialize IOADL descriptor addresses */
3378 for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
3379 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
3380 ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
3381 ioadl[i].flags = 0;
3382 }
3383
3384 /* setup the last descriptor */
3385 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
3386
3387 return 0;
3388}
3389
3390
3391/**
3392 * pmcraid_release_passthrough_ioadls - release passthrough ioadls
3393 *
3394 * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
3395 * @buflen: size of the request buffer
3396 * @direction: data transfer direction
3397 *
3398 * Return value
3399 * 0 on sucess, non-zero error code on failure
3400 */
3401static void pmcraid_release_passthrough_ioadls(
3402 struct pmcraid_cmd *cmd,
3403 int buflen,
3404 int direction
3405)
3406{
3407 struct pmcraid_sglist *sglist = cmd->sglist;
3408
3409 if (buflen > 0) {
3410 pci_unmap_sg(cmd->drv_inst->pdev,
3411 sglist->scatterlist,
3412 sglist->num_sg,
3413 direction);
3414 pmcraid_free_sglist(sglist);
3415 cmd->sglist = NULL;
3416 }
3417}
3418
3419/**
3420 * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
3421 *
3422 * @pinstance: pointer to adapter instance structure
3423 * @cmd: ioctl code
3424 * @arg: pointer to pmcraid_passthrough_buffer user buffer
3425 *
3426 * Return value
3427 * 0 on sucess, non-zero error code on failure
3428 */
3429static long pmcraid_ioctl_passthrough(
3430 struct pmcraid_instance *pinstance,
3431 unsigned int ioctl_cmd,
3432 unsigned int buflen,
3433 unsigned long arg
3434)
3435{
3436 struct pmcraid_passthrough_ioctl_buffer *buffer;
3437 struct pmcraid_ioarcb *ioarcb;
3438 struct pmcraid_cmd *cmd;
3439 struct pmcraid_cmd *cancel_cmd;
3440 unsigned long request_buffer;
3441 unsigned long request_offset;
3442 unsigned long lock_flags;
3443 int request_size;
3444 int buffer_size;
3445 u8 access, direction;
3446 int rc = 0;
3447
3448 /* If IOA reset is in progress, wait 10 secs for reset to complete */
3449 if (pinstance->ioa_reset_in_progress) {
3450 rc = wait_event_interruptible_timeout(
3451 pinstance->reset_wait_q,
3452 !pinstance->ioa_reset_in_progress,
3453 msecs_to_jiffies(10000));
3454
3455 if (!rc)
3456 return -ETIMEDOUT;
3457 else if (rc < 0)
3458 return -ERESTARTSYS;
3459 }
3460
3461 /* If adapter is not in operational state, return error */
3462 if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
3463 pmcraid_err("IOA is not operational\n");
3464 return -ENOTTY;
3465 }
3466
3467 buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
3468 buffer = kmalloc(buffer_size, GFP_KERNEL);
3469
3470 if (!buffer) {
3471 pmcraid_err("no memory for passthrough buffer\n");
3472 return -ENOMEM;
3473 }
3474
3475 request_offset =
3476 offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
3477
3478 request_buffer = arg + request_offset;
3479
3480 rc = __copy_from_user(buffer,
3481 (struct pmcraid_passthrough_ioctl_buffer *) arg,
3482 sizeof(struct pmcraid_passthrough_ioctl_buffer));
3483 if (rc) {
3484 pmcraid_err("ioctl: can't copy passthrough buffer\n");
3485 rc = -EFAULT;
3486 goto out_free_buffer;
3487 }
3488
3489 request_size = buffer->ioarcb.data_transfer_length;
3490
3491 if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
3492 access = VERIFY_READ;
3493 direction = DMA_TO_DEVICE;
3494 } else {
3495 access = VERIFY_WRITE;
3496 direction = DMA_FROM_DEVICE;
3497 }
3498
3499 if (request_size > 0) {
3500 rc = access_ok(access, arg, request_offset + request_size);
3501
3502 if (!rc) {
3503 rc = -EFAULT;
3504 goto out_free_buffer;
3505 }
3506 }
3507
3508 /* check if we have any additional command parameters */
3509 if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
3510 rc = -EINVAL;
3511 goto out_free_buffer;
3512 }
3513
3514 cmd = pmcraid_get_free_cmd(pinstance);
3515
3516 if (!cmd) {
3517 pmcraid_err("free command block is not available\n");
3518 rc = -ENOMEM;
3519 goto out_free_buffer;
3520 }
3521
3522 cmd->scsi_cmd = NULL;
3523 ioarcb = &(cmd->ioa_cb->ioarcb);
3524
3525 /* Copy the user-provided IOARCB stuff field by field */
3526 ioarcb->resource_handle = buffer->ioarcb.resource_handle;
3527 ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
3528 ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
3529 ioarcb->request_type = buffer->ioarcb.request_type;
3530 ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
3531 ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
3532 memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
3533
3534 if (buffer->ioarcb.add_cmd_param_length) {
3535 ioarcb->add_cmd_param_length =
3536 buffer->ioarcb.add_cmd_param_length;
3537 ioarcb->add_cmd_param_offset =
3538 buffer->ioarcb.add_cmd_param_offset;
3539 memcpy(ioarcb->add_data.u.add_cmd_params,
3540 buffer->ioarcb.add_data.u.add_cmd_params,
3541 buffer->ioarcb.add_cmd_param_length);
3542 }
3543
3544 if (request_size) {
3545 rc = pmcraid_build_passthrough_ioadls(cmd,
3546 request_size,
3547 direction);
3548 if (rc) {
3549 pmcraid_err("couldn't build passthrough ioadls\n");
3550 goto out_free_buffer;
3551 }
3552 }
3553
3554 /* If data is being written into the device, copy the data from user
3555 * buffers
3556 */
3557 if (direction == DMA_TO_DEVICE && request_size > 0) {
3558 rc = pmcraid_copy_sglist(cmd->sglist,
3559 request_buffer,
3560 request_size,
3561 direction);
3562 if (rc) {
3563 pmcraid_err("failed to copy user buffer\n");
3564 goto out_free_sglist;
3565 }
3566 }
3567
3568 /* passthrough ioctl is a blocking command so, put the user to sleep
3569 * until timeout. Note that a timeout value of 0 means, do timeout.
3570 */
3571 cmd->cmd_done = pmcraid_internal_done;
3572 init_completion(&cmd->wait_for_completion);
3573 cmd->completion_req = 1;
3574
3575 pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
3576 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
3577 cmd->ioa_cb->ioarcb.cdb[0],
3578 le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
3579
3580 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3581 _pmcraid_fire_command(cmd);
3582 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3583
3584 /* If command timeout is specified put caller to wait till that time,
3585 * otherwise it would be blocking wait. If command gets timed out, it
3586 * will be aborted.
3587 */
3588 if (buffer->ioarcb.cmd_timeout == 0) {
3589 wait_for_completion(&cmd->wait_for_completion);
3590 } else if (!wait_for_completion_timeout(
3591 &cmd->wait_for_completion,
3592 msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
3593
3594 pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
3595 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
3596 cmd->ioa_cb->ioarcb.cdb[0]);
3597
3598 rc = -ETIMEDOUT;
3599 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3600 cancel_cmd = pmcraid_abort_cmd(cmd);
3601 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3602
3603 if (cancel_cmd) {
3604 wait_for_completion(&cancel_cmd->wait_for_completion);
3605 pmcraid_return_cmd(cancel_cmd);
3606 }
3607
3608 goto out_free_sglist;
3609 }
3610
3611 /* If the command failed for any reason, copy entire IOASA buffer and
3612 * return IOCTL success. If copying IOASA to user-buffer fails, return
3613 * EFAULT
3614 */
3615 if (le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)) {
3616
3617 void *ioasa =
3618 (void *)(arg +
3619 offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
3620
3621 pmcraid_info("command failed with %x\n",
3622 le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
3623 if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
3624 sizeof(struct pmcraid_ioasa))) {
3625 pmcraid_err("failed to copy ioasa buffer to user\n");
3626 rc = -EFAULT;
3627 }
3628 }
3629 /* If the data transfer was from device, copy the data onto user
3630 * buffers
3631 */
3632 else if (direction == DMA_FROM_DEVICE && request_size > 0) {
3633 rc = pmcraid_copy_sglist(cmd->sglist,
3634 request_buffer,
3635 request_size,
3636 direction);
3637 if (rc) {
3638 pmcraid_err("failed to copy user buffer\n");
3639 rc = -EFAULT;
3640 }
3641 }
3642
3643out_free_sglist:
3644 pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
3645 pmcraid_return_cmd(cmd);
3646
3647out_free_buffer:
3648 kfree(buffer);
3649
3650 return rc;
3651}
3652
3653
3654
3655
3656/**
3657 * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
3658 *
3659 * @pinstance: pointer to adapter instance structure
3660 * @cmd: ioctl command passed in
3661 * @buflen: length of user_buffer
3662 * @user_buffer: user buffer pointer
3663 *
3664 * Return Value
3665 * 0 in case of success, otherwise appropriate error code
3666 */
3667static long pmcraid_ioctl_driver(
3668 struct pmcraid_instance *pinstance,
3669 unsigned int cmd,
3670 unsigned int buflen,
3671 void __user *user_buffer
3672)
3673{
3674 int rc = -ENOSYS;
3675
3676 if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
3677 pmcraid_err("ioctl_driver: access fault in request buffer \n");
3678 return -EFAULT;
3679 }
3680
3681 switch (cmd) {
3682 case PMCRAID_IOCTL_RESET_ADAPTER:
3683 pmcraid_reset_bringup(pinstance);
3684 rc = 0;
3685 break;
3686
3687 default:
3688 break;
3689 }
3690
3691 return rc;
3692}
3693
3694/**
3695 * pmcraid_check_ioctl_buffer - check for proper access to user buffer
3696 *
3697 * @cmd: ioctl command
3698 * @arg: user buffer
3699 * @hdr: pointer to kernel memory for pmcraid_ioctl_header
3700 *
3701 * Return Value
3702 * negetive error code if there are access issues, otherwise zero.
3703 * Upon success, returns ioctl header copied out of user buffer.
3704 */
3705
3706static int pmcraid_check_ioctl_buffer(
3707 int cmd,
3708 void __user *arg,
3709 struct pmcraid_ioctl_header *hdr
3710)
3711{
3712 int rc = 0;
3713 int access = VERIFY_READ;
3714
3715 if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
3716 pmcraid_err("couldn't copy ioctl header from user buffer\n");
3717 return -EFAULT;
3718 }
3719
3720 /* check for valid driver signature */
3721 rc = memcmp(hdr->signature,
3722 PMCRAID_IOCTL_SIGNATURE,
3723 sizeof(hdr->signature));
3724 if (rc) {
3725 pmcraid_err("signature verification failed\n");
3726 return -EINVAL;
3727 }
3728
3729 /* buffer length can't be negetive */
3730 if (hdr->buffer_length < 0) {
3731 pmcraid_err("ioctl: invalid buffer length specified\n");
3732 return -EINVAL;
3733 }
3734
3735 /* check for appropriate buffer access */
3736 if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
3737 access = VERIFY_WRITE;
3738
3739 rc = access_ok(access,
3740 (arg + sizeof(struct pmcraid_ioctl_header)),
3741 hdr->buffer_length);
3742 if (!rc) {
3743 pmcraid_err("access failed for user buffer of size %d\n",
3744 hdr->buffer_length);
3745 return -EFAULT;
3746 }
3747
3748 return 0;
3749}
3750
3751/**
3752 * pmcraid_ioctl - char node ioctl entry point
3753 */
3754static long pmcraid_chr_ioctl(
3755 struct file *filep,
3756 unsigned int cmd,
3757 unsigned long arg
3758)
3759{
3760 struct pmcraid_instance *pinstance = NULL;
3761 struct pmcraid_ioctl_header *hdr = NULL;
3762 int retval = -ENOTTY;
3763
3764 hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
3765
3766 if (!hdr) {
3767 pmcraid_err("faile to allocate memory for ioctl header\n");
3768 return -ENOMEM;
3769 }
3770
3771 retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
3772
3773 if (retval) {
3774 pmcraid_info("chr_ioctl: header check failed\n");
3775 kfree(hdr);
3776 return retval;
3777 }
3778
3779 pinstance = (struct pmcraid_instance *)filep->private_data;
3780
3781 if (!pinstance) {
3782 pmcraid_info("adapter instance is not found\n");
3783 kfree(hdr);
3784 return -ENOTTY;
3785 }
3786
3787 switch (_IOC_TYPE(cmd)) {
3788
3789 case PMCRAID_PASSTHROUGH_IOCTL:
3790 /* If ioctl code is to download microcode, we need to block
3791 * mid-layer requests.
3792 */
3793 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
3794 scsi_block_requests(pinstance->host);
3795
3796 retval = pmcraid_ioctl_passthrough(pinstance,
3797 cmd,
3798 hdr->buffer_length,
3799 arg);
3800
3801 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
3802 scsi_unblock_requests(pinstance->host);
3803 break;
3804
3805 case PMCRAID_DRIVER_IOCTL:
3806 arg += sizeof(struct pmcraid_ioctl_header);
3807 retval = pmcraid_ioctl_driver(pinstance,
3808 cmd,
3809 hdr->buffer_length,
3810 (void __user *)arg);
3811 break;
3812
3813 default:
3814 retval = -ENOTTY;
3815 break;
3816 }
3817
3818 kfree(hdr);
3819
3820 return retval;
3821}
3822
3823/**
3824 * File operations structure for management interface
3825 */
3826static const struct file_operations pmcraid_fops = {
3827 .owner = THIS_MODULE,
3828 .open = pmcraid_chr_open,
3829 .release = pmcraid_chr_release,
3830 .fasync = pmcraid_chr_fasync,
3831 .unlocked_ioctl = pmcraid_chr_ioctl,
3832#ifdef CONFIG_COMPAT
3833 .compat_ioctl = pmcraid_chr_ioctl,
3834#endif
3835};
3836
3837
3838
3839
3840/**
3841 * pmcraid_show_log_level - Display adapter's error logging level
3842 * @dev: class device struct
3843 * @buf: buffer
3844 *
3845 * Return value:
3846 * number of bytes printed to buffer
3847 */
3848static ssize_t pmcraid_show_log_level(
3849 struct device *dev,
3850 struct device_attribute *attr,
3851 char *buf)
3852{
3853 struct Scsi_Host *shost = class_to_shost(dev);
3854 struct pmcraid_instance *pinstance =
3855 (struct pmcraid_instance *)shost->hostdata;
3856 return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
3857}
3858
3859/**
3860 * pmcraid_store_log_level - Change the adapter's error logging level
3861 * @dev: class device struct
3862 * @buf: buffer
3863 * @count: not used
3864 *
3865 * Return value:
3866 * number of bytes printed to buffer
3867 */
3868static ssize_t pmcraid_store_log_level(
3869 struct device *dev,
3870 struct device_attribute *attr,
3871 const char *buf,
3872 size_t count
3873)
3874{
3875 struct Scsi_Host *shost;
3876 struct pmcraid_instance *pinstance;
3877 unsigned long val;
3878
3879 if (strict_strtoul(buf, 10, &val))
3880 return -EINVAL;
3881 /* log-level should be from 0 to 2 */
3882 if (val > 2)
3883 return -EINVAL;
3884
3885 shost = class_to_shost(dev);
3886 pinstance = (struct pmcraid_instance *)shost->hostdata;
3887 pinstance->current_log_level = val;
3888
3889 return strlen(buf);
3890}
3891
3892static struct device_attribute pmcraid_log_level_attr = {
3893 .attr = {
3894 .name = "log_level",
3895 .mode = S_IRUGO | S_IWUSR,
3896 },
3897 .show = pmcraid_show_log_level,
3898 .store = pmcraid_store_log_level,
3899};
3900
3901/**
3902 * pmcraid_show_drv_version - Display driver version
3903 * @dev: class device struct
3904 * @buf: buffer
3905 *
3906 * Return value:
3907 * number of bytes printed to buffer
3908 */
3909static ssize_t pmcraid_show_drv_version(
3910 struct device *dev,
3911 struct device_attribute *attr,
3912 char *buf
3913)
3914{
3915 return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n",
3916 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
3917}
3918
3919static struct device_attribute pmcraid_driver_version_attr = {
3920 .attr = {
3921 .name = "drv_version",
3922 .mode = S_IRUGO,
3923 },
3924 .show = pmcraid_show_drv_version,
3925};
3926
3927/**
3928 * pmcraid_show_io_adapter_id - Display driver assigned adapter id
3929 * @dev: class device struct
3930 * @buf: buffer
3931 *
3932 * Return value:
3933 * number of bytes printed to buffer
3934 */
3935static ssize_t pmcraid_show_adapter_id(
3936 struct device *dev,
3937 struct device_attribute *attr,
3938 char *buf
3939)
3940{
3941 struct Scsi_Host *shost = class_to_shost(dev);
3942 struct pmcraid_instance *pinstance =
3943 (struct pmcraid_instance *)shost->hostdata;
3944 u32 adapter_id = (pinstance->pdev->bus->number << 8) |
3945 pinstance->pdev->devfn;
3946 u32 aen_group = pmcraid_event_family.id;
3947
3948 return snprintf(buf, PAGE_SIZE,
3949 "adapter id: %d\nminor: %d\naen group: %d\n",
3950 adapter_id, MINOR(pinstance->cdev.dev), aen_group);
3951}
3952
3953static struct device_attribute pmcraid_adapter_id_attr = {
3954 .attr = {
3955 .name = "adapter_id",
3956 .mode = S_IRUGO | S_IWUSR,
3957 },
3958 .show = pmcraid_show_adapter_id,
3959};
3960
3961static struct device_attribute *pmcraid_host_attrs[] = {
3962 &pmcraid_log_level_attr,
3963 &pmcraid_driver_version_attr,
3964 &pmcraid_adapter_id_attr,
3965 NULL,
3966};
3967
3968
3969/* host template structure for pmcraid driver */
3970static struct scsi_host_template pmcraid_host_template = {
3971 .module = THIS_MODULE,
3972 .name = PMCRAID_DRIVER_NAME,
3973 .queuecommand = pmcraid_queuecommand,
3974 .eh_abort_handler = pmcraid_eh_abort_handler,
3975 .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
3976 .eh_target_reset_handler = pmcraid_eh_target_reset_handler,
3977 .eh_device_reset_handler = pmcraid_eh_device_reset_handler,
3978 .eh_host_reset_handler = pmcraid_eh_host_reset_handler,
3979
3980 .slave_alloc = pmcraid_slave_alloc,
3981 .slave_configure = pmcraid_slave_configure,
3982 .slave_destroy = pmcraid_slave_destroy,
3983 .change_queue_depth = pmcraid_change_queue_depth,
3984 .change_queue_type = pmcraid_change_queue_type,
3985 .can_queue = PMCRAID_MAX_IO_CMD,
3986 .this_id = -1,
3987 .sg_tablesize = PMCRAID_MAX_IOADLS,
3988 .max_sectors = PMCRAID_IOA_MAX_SECTORS,
3989 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
3990 .use_clustering = ENABLE_CLUSTERING,
3991 .shost_attrs = pmcraid_host_attrs,
3992 .proc_name = PMCRAID_DRIVER_NAME
3993};
3994
3995/**
3996 * pmcraid_isr_common - Common interrupt handler routine
3997 *
3998 * @pinstance: pointer to adapter instance
3999 * @intrs: active interrupts (contents of ioa_host_interrupt register)
4000 * @hrrq_id: Host RRQ index
4001 *
4002 * Return Value
4003 * none
4004 */
4005static void pmcraid_isr_common(
4006 struct pmcraid_instance *pinstance,
4007 u32 intrs,
4008 int hrrq_id
4009)
4010{
4011 u32 intrs_clear =
4012 (intrs & INTRS_CRITICAL_OP_IN_PROGRESS) ? intrs
4013 : INTRS_HRRQ_VALID;
4014 iowrite32(intrs_clear,
4015 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4016 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
4017
4018 /* hrrq valid bit was set, schedule tasklet to handle the response */
4019 if (intrs_clear == INTRS_HRRQ_VALID)
4020 tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
4021}
4022
4023/**
4024 * pmcraid_isr - implements interrupt handling routine
4025 *
4026 * @irq: interrupt vector number
4027 * @dev_id: pointer hrrq_vector
4028 *
4029 * Return Value
4030 * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4031 */
4032static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4033{
4034 struct pmcraid_isr_param *hrrq_vector;
4035 struct pmcraid_instance *pinstance;
4036 unsigned long lock_flags;
4037 u32 intrs;
4038
4039 /* In case of legacy interrupt mode where interrupts are shared across
4040 * isrs, it may be possible that the current interrupt is not from IOA
4041 */
4042 if (!dev_id) {
4043 printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
4044 return IRQ_NONE;
4045 }
4046
4047 hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4048 pinstance = hrrq_vector->drv_inst;
4049
4050 /* Acquire the lock (currently host_lock) while processing interrupts.
4051 * This interval is small as most of the response processing is done by
4052 * tasklet without the lock.
4053 */
4054 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
4055 intrs = pmcraid_read_interrupts(pinstance);
4056
4057 if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) {
4058 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4059 return IRQ_NONE;
4060 }
4061
4062 /* Any error interrupts including unit_check, initiate IOA reset.
4063 * In case of unit check indicate to reset_sequence that IOA unit
4064 * checked and prepare for a dump during reset sequence
4065 */
4066 if (intrs & PMCRAID_ERROR_INTERRUPTS) {
4067
4068 if (intrs & INTRS_IOA_UNIT_CHECK)
4069 pinstance->ioa_unit_check = 1;
4070
4071 iowrite32(intrs,
4072 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4073 pmcraid_err("ISR: error interrupts: %x initiating reset\n",
4074 intrs);
4075 intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
4076 pmcraid_initiate_reset(pinstance);
4077 } else {
4078 pmcraid_isr_common(pinstance, intrs, hrrq_vector->hrrq_id);
4079 }
4080
4081 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4082
4083 return IRQ_HANDLED;
4084}
4085
4086
4087/**
4088 * pmcraid_worker_function - worker thread function
4089 *
4090 * @workp: pointer to struct work queue
4091 *
4092 * Return Value
4093 * None
4094 */
4095
4096static void pmcraid_worker_function(struct work_struct *workp)
4097{
4098 struct pmcraid_instance *pinstance;
4099 struct pmcraid_resource_entry *res;
4100 struct pmcraid_resource_entry *temp;
4101 struct scsi_device *sdev;
4102 unsigned long lock_flags;
4103 unsigned long host_lock_flags;
4104 u8 bus, target, lun;
4105
4106 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
4107 /* add resources only after host is added into system */
4108 if (!atomic_read(&pinstance->expose_resources))
4109 return;
4110
4111 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
4112 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
4113
4114 if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
4115 sdev = res->scsi_dev;
4116
4117 /* host_lock must be held before calling
4118 * scsi_device_get
4119 */
4120 spin_lock_irqsave(pinstance->host->host_lock,
4121 host_lock_flags);
4122 if (!scsi_device_get(sdev)) {
4123 spin_unlock_irqrestore(
4124 pinstance->host->host_lock,
4125 host_lock_flags);
4126 pmcraid_info("deleting %x from midlayer\n",
4127 res->cfg_entry.resource_address);
4128 list_move_tail(&res->queue,
4129 &pinstance->free_res_q);
4130 spin_unlock_irqrestore(
4131 &pinstance->resource_lock,
4132 lock_flags);
4133 scsi_remove_device(sdev);
4134 scsi_device_put(sdev);
4135 spin_lock_irqsave(&pinstance->resource_lock,
4136 lock_flags);
4137 res->change_detected = 0;
4138 } else {
4139 spin_unlock_irqrestore(
4140 pinstance->host->host_lock,
4141 host_lock_flags);
4142 }
4143 }
4144 }
4145
4146 list_for_each_entry(res, &pinstance->used_res_q, queue) {
4147
4148 if (res->change_detected == RES_CHANGE_ADD) {
4149
4150 if (!pmcraid_expose_resource(&res->cfg_entry))
4151 continue;
4152
4153 if (RES_IS_VSET(res->cfg_entry)) {
4154 bus = PMCRAID_VSET_BUS_ID;
4155 target = res->cfg_entry.unique_flags1;
4156 lun = PMCRAID_VSET_LUN_ID;
4157 } else {
4158 bus = PMCRAID_PHYS_BUS_ID;
4159 target =
4160 RES_TARGET(
4161 res->cfg_entry.resource_address);
4162 lun = RES_LUN(res->cfg_entry.resource_address);
4163 }
4164
4165 res->change_detected = 0;
4166 spin_unlock_irqrestore(&pinstance->resource_lock,
4167 lock_flags);
4168 scsi_add_device(pinstance->host, bus, target, lun);
4169 spin_lock_irqsave(&pinstance->resource_lock,
4170 lock_flags);
4171 }
4172 }
4173
4174 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
4175}
4176
4177/**
4178 * pmcraid_tasklet_function - Tasklet function
4179 *
4180 * @instance: pointer to msix param structure
4181 *
4182 * Return Value
4183 * None
4184 */
4185void pmcraid_tasklet_function(unsigned long instance)
4186{
4187 struct pmcraid_isr_param *hrrq_vector;
4188 struct pmcraid_instance *pinstance;
4189 unsigned long hrrq_lock_flags;
4190 unsigned long pending_lock_flags;
4191 unsigned long host_lock_flags;
4192 spinlock_t *lockp; /* hrrq buffer lock */
4193 int id;
4194 u32 intrs;
4195 __le32 resp;
4196
4197 hrrq_vector = (struct pmcraid_isr_param *)instance;
4198 pinstance = hrrq_vector->drv_inst;
4199 id = hrrq_vector->hrrq_id;
4200 lockp = &(pinstance->hrrq_lock[id]);
4201 intrs = pmcraid_read_interrupts(pinstance);
4202
4203 /* If interrupts was as part of the ioa initialization, clear and mask
4204 * it. Delete the timer and wakeup the reset engine to proceed with
4205 * reset sequence
4206 */
4207 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
4208 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4209 pinstance->int_regs.ioa_host_interrupt_mask_reg);
4210 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
4211 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4212
4213 if (pinstance->reset_cmd != NULL) {
4214 del_timer(&pinstance->reset_cmd->timer);
4215 spin_lock_irqsave(pinstance->host->host_lock,
4216 host_lock_flags);
4217 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
4218 spin_unlock_irqrestore(pinstance->host->host_lock,
4219 host_lock_flags);
4220 }
4221 return;
4222 }
4223
4224 /* loop through each of the commands responded by IOA. Each HRRQ buf is
4225 * protected by its own lock. Traversals must be done within this lock
4226 * as there may be multiple tasklets running on multiple CPUs. Note
4227 * that the lock is held just for picking up the response handle and
4228 * manipulating hrrq_curr/toggle_bit values.
4229 */
4230 spin_lock_irqsave(lockp, hrrq_lock_flags);
4231
4232 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4233
4234 while ((resp & HRRQ_TOGGLE_BIT) ==
4235 pinstance->host_toggle_bit[id]) {
4236
4237 int cmd_index = resp >> 2;
4238 struct pmcraid_cmd *cmd = NULL;
4239
4240 if (cmd_index < PMCRAID_MAX_CMD) {
4241 cmd = pinstance->cmd_list[cmd_index];
4242 } else {
4243 /* In case of invalid response handle, initiate IOA
4244 * reset sequence.
4245 */
4246 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4247
4248 pmcraid_err("Invalid response %d initiating reset\n",
4249 cmd_index);
4250
4251 spin_lock_irqsave(pinstance->host->host_lock,
4252 host_lock_flags);
4253 pmcraid_initiate_reset(pinstance);
4254 spin_unlock_irqrestore(pinstance->host->host_lock,
4255 host_lock_flags);
4256
4257 spin_lock_irqsave(lockp, hrrq_lock_flags);
4258 break;
4259 }
4260
4261 if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
4262 pinstance->hrrq_curr[id]++;
4263 } else {
4264 pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
4265 pinstance->host_toggle_bit[id] ^= 1u;
4266 }
4267
4268 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4269
4270 spin_lock_irqsave(&pinstance->pending_pool_lock,
4271 pending_lock_flags);
4272 list_del(&cmd->free_list);
4273 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
4274 pending_lock_flags);
4275 del_timer(&cmd->timer);
4276 atomic_dec(&pinstance->outstanding_cmds);
4277
4278 if (cmd->cmd_done == pmcraid_ioa_reset) {
4279 spin_lock_irqsave(pinstance->host->host_lock,
4280 host_lock_flags);
4281 cmd->cmd_done(cmd);
4282 spin_unlock_irqrestore(pinstance->host->host_lock,
4283 host_lock_flags);
4284 } else if (cmd->cmd_done != NULL) {
4285 cmd->cmd_done(cmd);
4286 }
4287 /* loop over until we are done with all responses */
4288 spin_lock_irqsave(lockp, hrrq_lock_flags);
4289 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4290 }
4291
4292 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4293}
4294
4295/**
4296 * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
4297 * @pinstance: pointer to adapter instance structure
4298 *
4299 * This routine un-registers registered interrupt handler and
4300 * also frees irqs/vectors.
4301 *
4302 * Retun Value
4303 * None
4304 */
4305static
4306void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4307{
4308 free_irq(pinstance->pdev->irq, &(pinstance->hrrq_vector[0]));
4309}
4310
4311/**
4312 * pmcraid_register_interrupt_handler - registers interrupt handler
4313 * @pinstance: pointer to per-adapter instance structure
4314 *
4315 * Return Value
4316 * 0 on success, non-zero error code otherwise.
4317 */
4318static int
4319pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4320{
4321 struct pci_dev *pdev = pinstance->pdev;
4322
4323 pinstance->hrrq_vector[0].hrrq_id = 0;
4324 pinstance->hrrq_vector[0].drv_inst = pinstance;
4325 pinstance->hrrq_vector[0].vector = 0;
4326 pinstance->num_hrrq = 1;
4327 return request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
4328 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
4329}
4330
4331/**
4332 * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
4333 * @pinstance: per adapter instance structure pointer
4334 * @max_index: number of buffer blocks to release
4335 *
4336 * Return Value
4337 * None
4338 */
4339static void
4340pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
4341{
4342 int i;
4343 for (i = 0; i < max_index; i++) {
4344 kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
4345 pinstance->cmd_list[i] = NULL;
4346 }
4347 kmem_cache_destroy(pinstance->cmd_cachep);
4348 pinstance->cmd_cachep = NULL;
4349}
4350
4351/**
4352 * pmcraid_release_control_blocks - releases buffers alloced for control blocks
4353 * @pinstance: pointer to per adapter instance structure
4354 * @max_index: number of buffers (from 0 onwards) to release
4355 *
4356 * This function assumes that the command blocks for which control blocks are
4357 * linked are not released.
4358 *
4359 * Return Value
4360 * None
4361 */
4362static void
4363pmcraid_release_control_blocks(
4364 struct pmcraid_instance *pinstance,
4365 int max_index
4366)
4367{
4368 int i;
4369
4370 if (pinstance->control_pool == NULL)
4371 return;
4372
4373 for (i = 0; i < max_index; i++) {
4374 pci_pool_free(pinstance->control_pool,
4375 pinstance->cmd_list[i]->ioa_cb,
4376 pinstance->cmd_list[i]->ioa_cb_bus_addr);
4377 pinstance->cmd_list[i]->ioa_cb = NULL;
4378 pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
4379 }
4380 pci_pool_destroy(pinstance->control_pool);
4381 pinstance->control_pool = NULL;
4382}
4383
4384/**
4385 * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
4386 * @pinstance - pointer to per adapter instance structure
4387 *
4388 * Allocates memory for command blocks using kernel slab allocator.
4389 *
4390 * Return Value
4391 * 0 in case of success; -ENOMEM in case of failure
4392 */
4393static int __devinit
4394pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
4395{
4396 int i;
4397
4398 sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
4399 pinstance->host->unique_id);
4400
4401
4402 pinstance->cmd_cachep = kmem_cache_create(
4403 pinstance->cmd_pool_name,
4404 sizeof(struct pmcraid_cmd), 0,
4405 SLAB_HWCACHE_ALIGN, NULL);
4406 if (!pinstance->cmd_cachep)
4407 return -ENOMEM;
4408
4409 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4410 pinstance->cmd_list[i] =
4411 kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
4412 if (!pinstance->cmd_list[i]) {
4413 pmcraid_release_cmd_blocks(pinstance, i);
4414 return -ENOMEM;
4415 }
4416 }
4417 return 0;
4418}
4419
4420/**
4421 * pmcraid_allocate_control_blocks - allocates memory control blocks
4422 * @pinstance : pointer to per adapter instance structure
4423 *
4424 * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
4425 * and IOASAs. This is called after command blocks are already allocated.
4426 *
4427 * Return Value
4428 * 0 in case it can allocate all control blocks, otherwise -ENOMEM
4429 */
4430static int __devinit
4431pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
4432{
4433 int i;
4434
4435 sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
4436 pinstance->host->unique_id);
4437
4438 pinstance->control_pool =
4439 pci_pool_create(pinstance->ctl_pool_name,
4440 pinstance->pdev,
4441 sizeof(struct pmcraid_control_block),
4442 PMCRAID_IOARCB_ALIGNMENT, 0);
4443
4444 if (!pinstance->control_pool)
4445 return -ENOMEM;
4446
4447 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4448 pinstance->cmd_list[i]->ioa_cb =
4449 pci_pool_alloc(
4450 pinstance->control_pool,
4451 GFP_KERNEL,
4452 &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
4453
4454 if (!pinstance->cmd_list[i]->ioa_cb) {
4455 pmcraid_release_control_blocks(pinstance, i);
4456 return -ENOMEM;
4457 }
4458 memset(pinstance->cmd_list[i]->ioa_cb, 0,
4459 sizeof(struct pmcraid_control_block));
4460 }
4461 return 0;
4462}
4463
4464/**
4465 * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
4466 * @pinstance: pointer to per adapter instance structure
4467 * @maxindex: size of hrrq buffer pointer array
4468 *
4469 * Return Value
4470 * None
4471 */
4472static void
4473pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4474{
4475 int i;
4476 for (i = 0; i < maxindex; i++) {
4477
4478 pci_free_consistent(pinstance->pdev,
4479 HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
4480 pinstance->hrrq_start[i],
4481 pinstance->hrrq_start_bus_addr[i]);
4482
4483 /* reset pointers and toggle bit to zeros */
4484 pinstance->hrrq_start[i] = NULL;
4485 pinstance->hrrq_start_bus_addr[i] = 0;
4486 pinstance->host_toggle_bit[i] = 0;
4487 }
4488}
4489
4490/**
4491 * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
4492 * @pinstance: pointer to per adapter instance structure
4493 *
4494 * Return value
4495 * 0 hrrq buffers are allocated, -ENOMEM otherwise.
4496 */
4497static int __devinit
4498pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4499{
4500 int i;
4501 int buf_count = PMCRAID_MAX_CMD / pinstance->num_hrrq;
4502
4503 for (i = 0; i < pinstance->num_hrrq; i++) {
4504 int buffer_size = HRRQ_ENTRY_SIZE * buf_count;
4505
4506 pinstance->hrrq_start[i] =
4507 pci_alloc_consistent(
4508 pinstance->pdev,
4509 buffer_size,
4510 &(pinstance->hrrq_start_bus_addr[i]));
4511
4512 if (pinstance->hrrq_start[i] == 0) {
4513 pmcraid_err("could not allocate host rrq: %d\n", i);
4514 pmcraid_release_host_rrqs(pinstance, i);
4515 return -ENOMEM;
4516 }
4517
4518 memset(pinstance->hrrq_start[i], 0, buffer_size);
4519 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4520 pinstance->hrrq_end[i] =
4521 pinstance->hrrq_start[i] + buf_count - 1;
4522 pinstance->host_toggle_bit[i] = 1;
4523 spin_lock_init(&pinstance->hrrq_lock[i]);
4524 }
4525 return 0;
4526}
4527
4528/**
4529 * pmcraid_release_hcams - release HCAM buffers
4530 *
4531 * @pinstance: pointer to per adapter instance structure
4532 *
4533 * Return value
4534 * none
4535 */
4536static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4537{
4538 if (pinstance->ccn.msg != NULL) {
4539 pci_free_consistent(pinstance->pdev,
4540 PMCRAID_AEN_HDR_SIZE +
4541 sizeof(struct pmcraid_hcam_ccn),
4542 pinstance->ccn.msg,
4543 pinstance->ccn.baddr);
4544
4545 pinstance->ccn.msg = NULL;
4546 pinstance->ccn.hcam = NULL;
4547 pinstance->ccn.baddr = 0;
4548 }
4549
4550 if (pinstance->ldn.msg != NULL) {
4551 pci_free_consistent(pinstance->pdev,
4552 PMCRAID_AEN_HDR_SIZE +
4553 sizeof(struct pmcraid_hcam_ldn),
4554 pinstance->ldn.msg,
4555 pinstance->ldn.baddr);
4556
4557 pinstance->ldn.msg = NULL;
4558 pinstance->ldn.hcam = NULL;
4559 pinstance->ldn.baddr = 0;
4560 }
4561}
4562
4563/**
4564 * pmcraid_allocate_hcams - allocates HCAM buffers
4565 * @pinstance : pointer to per adapter instance structure
4566 *
4567 * Return Value:
4568 * 0 in case of successful allocation, non-zero otherwise
4569 */
4570static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4571{
4572 pinstance->ccn.msg = pci_alloc_consistent(
4573 pinstance->pdev,
4574 PMCRAID_AEN_HDR_SIZE +
4575 sizeof(struct pmcraid_hcam_ccn),
4576 &(pinstance->ccn.baddr));
4577
4578 pinstance->ldn.msg = pci_alloc_consistent(
4579 pinstance->pdev,
4580 PMCRAID_AEN_HDR_SIZE +
4581 sizeof(struct pmcraid_hcam_ldn),
4582 &(pinstance->ldn.baddr));
4583
4584 if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
4585 pmcraid_release_hcams(pinstance);
4586 } else {
4587 pinstance->ccn.hcam =
4588 (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
4589 pinstance->ldn.hcam =
4590 (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
4591
4592 atomic_set(&pinstance->ccn.ignore, 0);
4593 atomic_set(&pinstance->ldn.ignore, 0);
4594 }
4595
4596 return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
4597}
4598
4599/**
4600 * pmcraid_release_config_buffers - release config.table buffers
4601 * @pinstance: pointer to per adapter instance structure
4602 *
4603 * Return Value
4604 * none
4605 */
4606static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
4607{
4608 if (pinstance->cfg_table != NULL &&
4609 pinstance->cfg_table_bus_addr != 0) {
4610 pci_free_consistent(pinstance->pdev,
4611 sizeof(struct pmcraid_config_table),
4612 pinstance->cfg_table,
4613 pinstance->cfg_table_bus_addr);
4614 pinstance->cfg_table = NULL;
4615 pinstance->cfg_table_bus_addr = 0;
4616 }
4617
4618 if (pinstance->res_entries != NULL) {
4619 int i;
4620
4621 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4622 list_del(&pinstance->res_entries[i].queue);
4623 kfree(pinstance->res_entries);
4624 pinstance->res_entries = NULL;
4625 }
4626
4627 pmcraid_release_hcams(pinstance);
4628}
4629
4630/**
4631 * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
4632 * @pinstance : pointer to per adapter instance structure
4633 *
4634 * Return Value
4635 * 0 for successful allocation, -ENOMEM for any failure
4636 */
4637static int __devinit
4638pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
4639{
4640 int i;
4641
4642 pinstance->res_entries =
4643 kzalloc(sizeof(struct pmcraid_resource_entry) *
4644 PMCRAID_MAX_RESOURCES, GFP_KERNEL);
4645
4646 if (NULL == pinstance->res_entries) {
4647 pmcraid_err("failed to allocate memory for resource table\n");
4648 return -ENOMEM;
4649 }
4650
4651 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4652 list_add_tail(&pinstance->res_entries[i].queue,
4653 &pinstance->free_res_q);
4654
4655 pinstance->cfg_table =
4656 pci_alloc_consistent(pinstance->pdev,
4657 sizeof(struct pmcraid_config_table),
4658 &pinstance->cfg_table_bus_addr);
4659
4660 if (NULL == pinstance->cfg_table) {
4661 pmcraid_err("couldn't alloc DMA memory for config table\n");
4662 pmcraid_release_config_buffers(pinstance);
4663 return -ENOMEM;
4664 }
4665
4666 if (pmcraid_allocate_hcams(pinstance)) {
4667 pmcraid_err("could not alloc DMA memory for HCAMS\n");
4668 pmcraid_release_config_buffers(pinstance);
4669 return -ENOMEM;
4670 }
4671
4672 return 0;
4673}
4674
4675/**
4676 * pmcraid_init_tasklets - registers tasklets for response handling
4677 *
4678 * @pinstance: pointer adapter instance structure
4679 *
4680 * Return value
4681 * none
4682 */
4683static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
4684{
4685 int i;
4686 for (i = 0; i < pinstance->num_hrrq; i++)
4687 tasklet_init(&pinstance->isr_tasklet[i],
4688 pmcraid_tasklet_function,
4689 (unsigned long)&pinstance->hrrq_vector[i]);
4690}
4691
4692/**
4693 * pmcraid_kill_tasklets - destroys tasklets registered for response handling
4694 *
4695 * @pinstance: pointer to adapter instance structure
4696 *
4697 * Return value
4698 * none
4699 */
4700static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
4701{
4702 int i;
4703 for (i = 0; i < pinstance->num_hrrq; i++)
4704 tasklet_kill(&pinstance->isr_tasklet[i]);
4705}
4706
4707/**
4708 * pmcraid_init_buffers - allocates memory and initializes various structures
4709 * @pinstance: pointer to per adapter instance structure
4710 *
4711 * This routine pre-allocates memory based on the type of block as below:
4712 * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
4713 * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
4714 * config-table entries : DMAable memory using pci_alloc_consistent
4715 * HostRRQs : DMAable memory, using pci_alloc_consistent
4716 *
4717 * Return Value
4718 * 0 in case all of the blocks are allocated, -ENOMEM otherwise.
4719 */
4720static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
4721{
4722 int i;
4723
4724 if (pmcraid_allocate_host_rrqs(pinstance)) {
4725 pmcraid_err("couldn't allocate memory for %d host rrqs\n",
4726 pinstance->num_hrrq);
4727 return -ENOMEM;
4728 }
4729
4730 if (pmcraid_allocate_config_buffers(pinstance)) {
4731 pmcraid_err("couldn't allocate memory for config buffers\n");
4732 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4733 return -ENOMEM;
4734 }
4735
4736 if (pmcraid_allocate_cmd_blocks(pinstance)) {
4737 pmcraid_err("couldn't allocate memory for cmd blocks \n");
4738 pmcraid_release_config_buffers(pinstance);
4739 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4740 return -ENOMEM;
4741 }
4742
4743 if (pmcraid_allocate_control_blocks(pinstance)) {
4744 pmcraid_err("couldn't allocate memory control blocks \n");
4745 pmcraid_release_config_buffers(pinstance);
4746 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4747 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4748 return -ENOMEM;
4749 }
4750
4751 /* Initialize all the command blocks and add them to free pool. No
4752 * need to lock (free_pool_lock) as this is done in initialization
4753 * itself
4754 */
4755 for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4756 struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
4757 pmcraid_init_cmdblk(cmdp, i);
4758 cmdp->drv_inst = pinstance;
4759 list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
4760 }
4761
4762 return 0;
4763}
4764
4765/**
4766 * pmcraid_reinit_buffers - resets various buffer pointers
4767 * @pinstance: pointer to adapter instance
4768 * Return value
4769 * none
4770 */
4771static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
4772{
4773 int i;
4774 int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
4775
4776 for (i = 0; i < pinstance->num_hrrq; i++) {
4777 memset(pinstance->hrrq_start[i], 0, buffer_size);
4778 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4779 pinstance->hrrq_end[i] =
4780 pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
4781 pinstance->host_toggle_bit[i] = 1;
4782 }
4783}
4784
4785/**
4786 * pmcraid_init_instance - initialize per instance data structure
4787 * @pdev: pointer to pci device structure
4788 * @host: pointer to Scsi_Host structure
4789 * @mapped_pci_addr: memory mapped IOA configuration registers
4790 *
4791 * Return Value
4792 * 0 on success, non-zero in case of any failure
4793 */
4794static int __devinit pmcraid_init_instance(
4795 struct pci_dev *pdev,
4796 struct Scsi_Host *host,
4797 void __iomem *mapped_pci_addr
4798)
4799{
4800 struct pmcraid_instance *pinstance =
4801 (struct pmcraid_instance *)host->hostdata;
4802
4803 pinstance->host = host;
4804 pinstance->pdev = pdev;
4805
4806 /* Initialize register addresses */
4807 pinstance->mapped_dma_addr = mapped_pci_addr;
4808
4809 /* Initialize chip-specific details */
4810 {
4811 struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
4812 struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
4813
4814 pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
4815
4816 pint_regs->ioa_host_interrupt_reg =
4817 mapped_pci_addr + chip_cfg->ioa_host_intr;
4818 pint_regs->ioa_host_interrupt_clr_reg =
4819 mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
4820 pint_regs->host_ioa_interrupt_reg =
4821 mapped_pci_addr + chip_cfg->host_ioa_intr;
4822 pint_regs->host_ioa_interrupt_clr_reg =
4823 mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
4824
4825 /* Current version of firmware exposes interrupt mask set
4826 * and mask clr registers through memory mapped bar0.
4827 */
4828 pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
4829 pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
4830 pint_regs->ioa_host_interrupt_mask_reg =
4831 mapped_pci_addr + chip_cfg->ioa_host_mask;
4832 pint_regs->ioa_host_interrupt_mask_clr_reg =
4833 mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
4834 pint_regs->global_interrupt_mask_reg =
4835 mapped_pci_addr + chip_cfg->global_intr_mask;
4836 };
4837
4838 pinstance->ioa_reset_attempts = 0;
4839 init_waitqueue_head(&pinstance->reset_wait_q);
4840
4841 atomic_set(&pinstance->outstanding_cmds, 0);
4842 atomic_set(&pinstance->expose_resources, 0);
4843
4844 INIT_LIST_HEAD(&pinstance->free_res_q);
4845 INIT_LIST_HEAD(&pinstance->used_res_q);
4846 INIT_LIST_HEAD(&pinstance->free_cmd_pool);
4847 INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
4848
4849 spin_lock_init(&pinstance->free_pool_lock);
4850 spin_lock_init(&pinstance->pending_pool_lock);
4851 spin_lock_init(&pinstance->resource_lock);
4852 mutex_init(&pinstance->aen_queue_lock);
4853
4854 /* Work-queue (Shared) for deferred processing error handling */
4855 INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
4856
4857 /* Initialize the default log_level */
4858 pinstance->current_log_level = pmcraid_log_level;
4859
4860 /* Setup variables required for reset engine */
4861 pinstance->ioa_state = IOA_STATE_UNKNOWN;
4862 pinstance->reset_cmd = NULL;
4863 return 0;
4864}
4865
4866/**
4867 * pmcraid_release_buffers - release per-adapter buffers allocated
4868 *
4869 * @pinstance: pointer to adapter soft state
4870 *
4871 * Return Value
4872 * none
4873 */
4874static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4875{
4876 pmcraid_release_config_buffers(pinstance);
4877 pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
4878 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4879 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4880
4881}
4882
4883/**
4884 * pmcraid_shutdown - shutdown adapter controller.
4885 * @pdev: pci device struct
4886 *
4887 * Issues an adapter shutdown to the card waits for its completion
4888 *
4889 * Return value
4890 * none
4891 */
4892static void pmcraid_shutdown(struct pci_dev *pdev)
4893{
4894 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
4895 pmcraid_reset_bringdown(pinstance);
4896}
4897
4898
4899/**
4900 * pmcraid_get_minor - returns unused minor number from minor number bitmap
4901 */
4902static unsigned short pmcraid_get_minor(void)
4903{
4904 int minor;
4905
4906 minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
4907 __set_bit(minor, pmcraid_minor);
4908 return minor;
4909}
4910
4911/**
4912 * pmcraid_release_minor - releases given minor back to minor number bitmap
4913 */
4914static void pmcraid_release_minor(unsigned short minor)
4915{
4916 __clear_bit(minor, pmcraid_minor);
4917}
4918
4919/**
4920 * pmcraid_setup_chrdev - allocates a minor number and registers a char device
4921 *
4922 * @pinstance: pointer to adapter instance for which to register device
4923 *
4924 * Return value
4925 * 0 in case of success, otherwise non-zero
4926 */
4927static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
4928{
4929 int minor;
4930 int error;
4931
4932 minor = pmcraid_get_minor();
4933 cdev_init(&pinstance->cdev, &pmcraid_fops);
4934 pinstance->cdev.owner = THIS_MODULE;
4935
4936 error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
4937
4938 if (error)
4939 pmcraid_release_minor(minor);
4940 else
4941 device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
4942 NULL, "pmcsas%u", minor);
4943 return error;
4944}
4945
4946/**
4947 * pmcraid_release_chrdev - unregisters per-adapter management interface
4948 *
4949 * @pinstance: pointer to adapter instance structure
4950 *
4951 * Return value
4952 * none
4953 */
4954static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
4955{
4956 pmcraid_release_minor(MINOR(pinstance->cdev.dev));
4957 device_destroy(pmcraid_class,
4958 MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
4959 cdev_del(&pinstance->cdev);
4960}
4961
4962/**
4963 * pmcraid_remove - IOA hot plug remove entry point
4964 * @pdev: pci device struct
4965 *
4966 * Return value
4967 * none
4968 */
4969static void __devexit pmcraid_remove(struct pci_dev *pdev)
4970{
4971 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
4972
4973 /* remove the management interface (/dev file) for this device */
4974 pmcraid_release_chrdev(pinstance);
4975
4976 /* remove host template from scsi midlayer */
4977 scsi_remove_host(pinstance->host);
4978
4979 /* block requests from mid-layer */
4980 scsi_block_requests(pinstance->host);
4981
4982 /* initiate shutdown adapter */
4983 pmcraid_shutdown(pdev);
4984
4985 pmcraid_disable_interrupts(pinstance, ~0);
4986 flush_scheduled_work();
4987
4988 pmcraid_kill_tasklets(pinstance);
4989 pmcraid_unregister_interrupt_handler(pinstance);
4990 pmcraid_release_buffers(pinstance);
4991 iounmap(pinstance->mapped_dma_addr);
4992 pci_release_regions(pdev);
4993 scsi_host_put(pinstance->host);
4994 pci_disable_device(pdev);
4995
4996 return;
4997}
4998
4999#ifdef CONFIG_PM
5000/**
5001 * pmcraid_suspend - driver suspend entry point for power management
5002 * @pdev: PCI device structure
5003 * @state: PCI power state to suspend routine
5004 *
5005 * Return Value - 0 always
5006 */
5007static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
5008{
5009 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5010
5011 pmcraid_shutdown(pdev);
5012 pmcraid_disable_interrupts(pinstance, ~0);
5013 pmcraid_kill_tasklets(pinstance);
5014 pci_set_drvdata(pinstance->pdev, pinstance);
5015 pmcraid_unregister_interrupt_handler(pinstance);
5016 pci_save_state(pdev);
5017 pci_disable_device(pdev);
5018 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5019
5020 return 0;
5021}
5022
5023/**
5024 * pmcraid_resume - driver resume entry point PCI power management
5025 * @pdev: PCI device structure
5026 *
5027 * Return Value - 0 in case of success. Error code in case of any failure
5028 */
5029static int pmcraid_resume(struct pci_dev *pdev)
5030{
5031 struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5032 struct Scsi_Host *host = pinstance->host;
5033 int rc;
5034 int hrrqs;
5035
5036 pci_set_power_state(pdev, PCI_D0);
5037 pci_enable_wake(pdev, PCI_D0, 0);
5038 pci_restore_state(pdev);
5039
5040 rc = pci_enable_device(pdev);
5041
5042 if (rc) {
5043 pmcraid_err("pmcraid: Enable device failed\n");
5044 return rc;
5045 }
5046
5047 pci_set_master(pdev);
5048
5049 if ((sizeof(dma_addr_t) == 4) ||
5050 pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5051 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5052
5053 if (rc == 0)
5054 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5055
5056 if (rc != 0) {
5057 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5058 goto disable_device;
5059 }
5060
5061 atomic_set(&pinstance->outstanding_cmds, 0);
5062 hrrqs = pinstance->num_hrrq;
5063 rc = pmcraid_register_interrupt_handler(pinstance);
5064
5065 if (rc) {
5066 pmcraid_err("resume: couldn't register interrupt handlers\n");
5067 rc = -ENODEV;
5068 goto release_host;
5069 }
5070
5071 pmcraid_init_tasklets(pinstance);
5072 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5073
5074 /* Start with hard reset sequence which brings up IOA to operational
5075 * state as well as completes the reset sequence.
5076 */
5077 pinstance->ioa_hard_reset = 1;
5078
5079 /* Start IOA firmware initialization and bring card to Operational
5080 * state.
5081 */
5082 if (pmcraid_reset_bringup(pinstance)) {
5083 pmcraid_err("couldn't initialize IOA \n");
5084 rc = -ENODEV;
5085 goto release_tasklets;
5086 }
5087
5088 return 0;
5089
5090release_tasklets:
5091 pmcraid_kill_tasklets(pinstance);
5092 pmcraid_unregister_interrupt_handler(pinstance);
5093
5094release_host:
5095 scsi_host_put(host);
5096
5097disable_device:
5098 pci_disable_device(pdev);
5099
5100 return rc;
5101}
5102
5103#else
5104
5105#define pmcraid_suspend NULL
5106#define pmcraid_resume NULL
5107
5108#endif /* CONFIG_PM */
5109
5110/**
5111 * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
5112 * completion of the ioa reset
5113 * @cmd: pointer to reset command block
5114 */
5115static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
5116{
5117 struct pmcraid_instance *pinstance = cmd->drv_inst;
5118 unsigned long flags;
5119
5120 spin_lock_irqsave(pinstance->host->host_lock, flags);
5121 pmcraid_ioa_reset(cmd);
5122 spin_unlock_irqrestore(pinstance->host->host_lock, flags);
5123 scsi_unblock_requests(pinstance->host);
5124 schedule_work(&pinstance->worker_q);
5125}
5126
5127/**
5128 * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
5129 *
5130 * @cmd: pointer to pmcraid_cmd structure
5131 *
5132 * Return Value
5133 * 0 for success or non-zero for failure cases
5134 */
5135static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
5136{
5137 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5138 void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
5139
5140 pmcraid_reinit_cmdblk(cmd);
5141
5142 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5143 ioarcb->request_type = REQ_TYPE_IOACMD;
5144 ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
5145 ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
5146
5147 /* If this was called as part of resource table reinitialization due to
5148 * lost CCN, it is enough to return the command block back to free pool
5149 * as part of set_supported_devs completion function.
5150 */
5151 if (cmd->drv_inst->reinit_cfg_table) {
5152 cmd->drv_inst->reinit_cfg_table = 0;
5153 cmd->release = 1;
5154 cmd_done = pmcraid_reinit_cfgtable_done;
5155 }
5156
5157 /* we will be done with the reset sequence after set supported devices,
5158 * setup the done function to return the command block back to free
5159 * pool
5160 */
5161 pmcraid_send_cmd(cmd,
5162 cmd_done,
5163 PMCRAID_SET_SUP_DEV_TIMEOUT,
5164 pmcraid_timeout_handler);
5165 return;
5166}
5167
5168/**
5169 * pmcraid_init_res_table - Initialize the resource table
5170 * @cmd: pointer to pmcraid command struct
5171 *
5172 * This function looks through the existing resource table, comparing
5173 * it with the config table. This function will take care of old/new
5174 * devices and schedule adding/removing them from the mid-layer
5175 * as appropriate.
5176 *
5177 * Return value
5178 * None
5179 */
5180static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5181{
5182 struct pmcraid_instance *pinstance = cmd->drv_inst;
5183 struct pmcraid_resource_entry *res, *temp;
5184 struct pmcraid_config_table_entry *cfgte;
5185 unsigned long lock_flags;
5186 int found, rc, i;
5187 LIST_HEAD(old_res);
5188
5189 if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
5190 dev_err(&pinstance->pdev->dev, "Require microcode download\n");
5191
5192 /* resource list is protected by pinstance->resource_lock.
5193 * init_res_table can be called from probe (user-thread) or runtime
5194 * reset (timer/tasklet)
5195 */
5196 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
5197
5198 list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
5199 list_move_tail(&res->queue, &old_res);
5200
5201 for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
5202 cfgte = &pinstance->cfg_table->entries[i];
5203
5204 if (!pmcraid_expose_resource(cfgte))
5205 continue;
5206
5207 found = 0;
5208
5209 /* If this entry was already detected and initialized */
5210 list_for_each_entry_safe(res, temp, &old_res, queue) {
5211
5212 rc = memcmp(&res->cfg_entry.resource_address,
5213 &cfgte->resource_address,
5214 sizeof(cfgte->resource_address));
5215 if (!rc) {
5216 list_move_tail(&res->queue,
5217 &pinstance->used_res_q);
5218 found = 1;
5219 break;
5220 }
5221 }
5222
5223 /* If this is new entry, initialize it and add it the queue */
5224 if (!found) {
5225
5226 if (list_empty(&pinstance->free_res_q)) {
5227 dev_err(&pinstance->pdev->dev,
5228 "Too many devices attached\n");
5229 break;
5230 }
5231
5232 found = 1;
5233 res = list_entry(pinstance->free_res_q.next,
5234 struct pmcraid_resource_entry, queue);
5235
5236 res->scsi_dev = NULL;
5237 res->change_detected = RES_CHANGE_ADD;
5238 res->reset_progress = 0;
5239 list_move_tail(&res->queue, &pinstance->used_res_q);
5240 }
5241
5242 /* copy new configuration table entry details into driver
5243 * maintained resource entry
5244 */
5245 if (found) {
5246 memcpy(&res->cfg_entry, cfgte,
5247 sizeof(struct pmcraid_config_table_entry));
5248 pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
5249 res->cfg_entry.resource_type,
5250 res->cfg_entry.unique_flags1,
5251 le32_to_cpu(res->cfg_entry.resource_address));
5252 }
5253 }
5254
5255 /* Detect any deleted entries, mark them for deletion from mid-layer */
5256 list_for_each_entry_safe(res, temp, &old_res, queue) {
5257
5258 if (res->scsi_dev) {
5259 res->change_detected = RES_CHANGE_DEL;
5260 res->cfg_entry.resource_handle =
5261 PMCRAID_INVALID_RES_HANDLE;
5262 list_move_tail(&res->queue, &pinstance->used_res_q);
5263 } else {
5264 list_move_tail(&res->queue, &pinstance->free_res_q);
5265 }
5266 }
5267
5268 /* release the resource list lock */
5269 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
5270 pmcraid_set_supported_devs(cmd);
5271}
5272
5273/**
5274 * pmcraid_querycfg - Send a Query IOA Config to the adapter.
5275 * @cmd: pointer pmcraid_cmd struct
5276 *
5277 * This function sends a Query IOA Configuration command to the adapter to
5278 * retrieve the IOA configuration table.
5279 *
5280 * Return value:
5281 * none
5282 */
5283static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5284{
5285 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5286 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5287 struct pmcraid_instance *pinstance = cmd->drv_inst;
5288 int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
5289
5290 ioarcb->request_type = REQ_TYPE_IOACMD;
5291 ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5292
5293 ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
5294
5295 /* firmware requires 4-byte length field, specified in B.E format */
5296 memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
5297
5298 /* Since entire config table can be described by single IOADL, it can
5299 * be part of IOARCB itself
5300 */
5301 ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5302 offsetof(struct pmcraid_ioarcb,
5303 add_data.u.ioadl[0]));
5304 ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5305 ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
5306
5307 ioarcb->request_flags0 |= NO_LINK_DESCS;
5308 ioarcb->data_transfer_length =
5309 cpu_to_le32(sizeof(struct pmcraid_config_table));
5310
5311 ioadl = &(ioarcb->add_data.u.ioadl[0]);
5312 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
5313 ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
5314 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
5315
5316 pmcraid_send_cmd(cmd, pmcraid_init_res_table,
5317 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5318}
5319
5320
5321/**
5322 * pmcraid_probe - PCI probe entry pointer for PMC MaxRaid controller driver
5323 * @pdev: pointer to pci device structure
5324 * @dev_id: pointer to device ids structure
5325 *
5326 * Return Value
5327 * returns 0 if the device is claimed and successfully configured.
5328 * returns non-zero error code in case of any failure
5329 */
5330static int __devinit pmcraid_probe(
5331 struct pci_dev *pdev,
5332 const struct pci_device_id *dev_id
5333)
5334{
5335 struct pmcraid_instance *pinstance;
5336 struct Scsi_Host *host;
5337 void __iomem *mapped_pci_addr;
5338 int rc = PCIBIOS_SUCCESSFUL;
5339
5340 if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
5341 pmcraid_err
5342 ("maximum number(%d) of supported adapters reached\n",
5343 atomic_read(&pmcraid_adapter_count));
5344 return -ENOMEM;
5345 }
5346
5347 atomic_inc(&pmcraid_adapter_count);
5348 rc = pci_enable_device(pdev);
5349
5350 if (rc) {
5351 dev_err(&pdev->dev, "Cannot enable adapter\n");
5352 atomic_dec(&pmcraid_adapter_count);
5353 return rc;
5354 }
5355
5356 dev_info(&pdev->dev,
5357 "Found new IOA(%x:%x), Total IOA count: %d\n",
5358 pdev->vendor, pdev->device,
5359 atomic_read(&pmcraid_adapter_count));
5360
5361 rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
5362
5363 if (rc < 0) {
5364 dev_err(&pdev->dev,
5365 "Couldn't register memory range of registers\n");
5366 goto out_disable_device;
5367 }
5368
5369 mapped_pci_addr = pci_iomap(pdev, 0, 0);
5370
5371 if (!mapped_pci_addr) {
5372 dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
5373 rc = -ENOMEM;
5374 goto out_release_regions;
5375 }
5376
5377 pci_set_master(pdev);
5378
5379 /* Firmware requires the system bus address of IOARCB to be within
5380 * 32-bit addressable range though it has 64-bit IOARRIN register.
5381 * However, firmware supports 64-bit streaming DMA buffers, whereas
5382 * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
5383 * returns memory within 4GB (if not, change this logic), coherent
5384 * buffers are within firmware acceptible address ranges.
5385 */
5386 if ((sizeof(dma_addr_t) == 4) ||
5387 pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5388 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5389
5390 /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
5391 * bit mask for pci_alloc_consistent to return addresses within 4GB
5392 */
5393 if (rc == 0)
5394 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5395
5396 if (rc != 0) {
5397 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5398 goto cleanup_nomem;
5399 }
5400
5401 host = scsi_host_alloc(&pmcraid_host_template,
5402 sizeof(struct pmcraid_instance));
5403
5404 if (!host) {
5405 dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
5406 rc = -ENOMEM;
5407 goto cleanup_nomem;
5408 }
5409
5410 host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
5411 host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
5412 host->unique_id = host->host_no;
5413 host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
5414 host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
5415
5416 /* zero out entire instance structure */
5417 pinstance = (struct pmcraid_instance *)host->hostdata;
5418 memset(pinstance, 0, sizeof(*pinstance));
5419
5420 pinstance->chip_cfg =
5421 (struct pmcraid_chip_details *)(dev_id->driver_data);
5422
5423 rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
5424
5425 if (rc < 0) {
5426 dev_err(&pdev->dev, "failed to initialize adapter instance\n");
5427 goto out_scsi_host_put;
5428 }
5429
5430 pci_set_drvdata(pdev, pinstance);
5431
5432 /* Save PCI config-space for use following the reset */
5433 rc = pci_save_state(pinstance->pdev);
5434
5435 if (rc != 0) {
5436 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5437 goto out_scsi_host_put;
5438 }
5439
5440 pmcraid_disable_interrupts(pinstance, ~0);
5441
5442 rc = pmcraid_register_interrupt_handler(pinstance);
5443
5444 if (rc) {
5445 pmcraid_err("couldn't register interrupt handler\n");
5446 goto out_scsi_host_put;
5447 }
5448
5449 pmcraid_init_tasklets(pinstance);
5450
5451 /* allocate verious buffers used by LLD.*/
5452 rc = pmcraid_init_buffers(pinstance);
5453
5454 if (rc) {
5455 pmcraid_err("couldn't allocate memory blocks\n");
5456 goto out_unregister_isr;
5457 }
5458
5459 /* check the reset type required */
5460 pmcraid_reset_type(pinstance);
5461
5462 pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5463
5464 /* Start IOA firmware initialization and bring card to Operational
5465 * state.
5466 */
5467 pmcraid_info("starting IOA initialization sequence\n");
5468 if (pmcraid_reset_bringup(pinstance)) {
5469 pmcraid_err("couldn't initialize IOA \n");
5470 rc = 1;
5471 goto out_release_bufs;
5472 }
5473
5474 /* Add adapter instance into mid-layer list */
5475 rc = scsi_add_host(pinstance->host, &pdev->dev);
5476 if (rc != 0) {
5477 pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
5478 goto out_release_bufs;
5479 }
5480
5481 scsi_scan_host(pinstance->host);
5482
5483 rc = pmcraid_setup_chrdev(pinstance);
5484
5485 if (rc != 0) {
5486 pmcraid_err("couldn't create mgmt interface, error: %x\n",
5487 rc);
5488 goto out_remove_host;
5489 }
5490
5491 /* Schedule worker thread to handle CCN and take care of adding and
5492 * removing devices to OS
5493 */
5494 atomic_set(&pinstance->expose_resources, 1);
5495 schedule_work(&pinstance->worker_q);
5496 return rc;
5497
5498out_remove_host:
5499 scsi_remove_host(host);
5500
5501out_release_bufs:
5502 pmcraid_release_buffers(pinstance);
5503
5504out_unregister_isr:
5505 pmcraid_kill_tasklets(pinstance);
5506 pmcraid_unregister_interrupt_handler(pinstance);
5507
5508out_scsi_host_put:
5509 scsi_host_put(host);
5510
5511cleanup_nomem:
5512 iounmap(mapped_pci_addr);
5513
5514out_release_regions:
5515 pci_release_regions(pdev);
5516
5517out_disable_device:
5518 atomic_dec(&pmcraid_adapter_count);
5519 pci_set_drvdata(pdev, NULL);
5520 pci_disable_device(pdev);
5521 return -ENODEV;
5522}
5523
5524/*
5525 * PCI driver structure of pcmraid driver
5526 */
5527static struct pci_driver pmcraid_driver = {
5528 .name = PMCRAID_DRIVER_NAME,
5529 .id_table = pmcraid_pci_table,
5530 .probe = pmcraid_probe,
5531 .remove = pmcraid_remove,
5532 .suspend = pmcraid_suspend,
5533 .resume = pmcraid_resume,
5534 .shutdown = pmcraid_shutdown
5535};
5536
5537
5538/**
5539 * pmcraid_init - module load entry point
5540 */
5541static int __init pmcraid_init(void)
5542{
5543 dev_t dev;
5544 int error;
5545
5546 pmcraid_info("%s Device Driver version: %s %s\n",
5547 PMCRAID_DRIVER_NAME,
5548 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
5549
5550 error = alloc_chrdev_region(&dev, 0,
5551 PMCRAID_MAX_ADAPTERS,
5552 PMCRAID_DEVFILE);
5553
5554 if (error) {
5555 pmcraid_err("failed to get a major number for adapters\n");
5556 goto out_init;
5557 }
5558
5559 pmcraid_major = MAJOR(dev);
5560 pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
5561
5562 if (IS_ERR(pmcraid_class)) {
5563 error = PTR_ERR(pmcraid_class);
5564 pmcraid_err("failed to register with with sysfs, error = %x\n",
5565 error);
5566 goto out_unreg_chrdev;
5567 }
5568
5569
5570 error = pmcraid_netlink_init();
5571
5572 if (error)
5573 goto out_unreg_chrdev;
5574
5575 error = pci_register_driver(&pmcraid_driver);
5576
5577 if (error == 0)
5578 goto out_init;
5579
5580 pmcraid_err("failed to register pmcraid driver, error = %x\n",
5581 error);
5582 class_destroy(pmcraid_class);
5583 pmcraid_netlink_release();
5584
5585out_unreg_chrdev:
5586 unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
5587out_init:
5588 return error;
5589}
5590
5591/**
5592 * pmcraid_exit - module unload entry point
5593 */
5594static void __exit pmcraid_exit(void)
5595{
5596 pmcraid_netlink_release();
5597 class_destroy(pmcraid_class);
5598 unregister_chrdev_region(MKDEV(pmcraid_major, 0),
5599 PMCRAID_MAX_ADAPTERS);
5600 pci_unregister_driver(&pmcraid_driver);
5601}
5602
5603module_init(pmcraid_init);
5604module_exit(pmcraid_exit);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
new file mode 100644
index 000000000000..614b3a764fed
--- /dev/null
+++ b/drivers/scsi/pmcraid.h
@@ -0,0 +1,1029 @@
1/*
2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
3 *
4 * Copyright (C) 2008, 2009 PMC Sierra Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _PMCRAID_H
22#define _PMCRAID_H
23
24#include <linux/version.h>
25#include <linux/types.h>
26#include <linux/completion.h>
27#include <linux/list.h>
28#include <scsi/scsi.h>
29#include <linux/kref.h>
30#include <scsi/scsi_cmnd.h>
31#include <linux/cdev.h>
32#include <net/netlink.h>
33#include <net/genetlink.h>
34#include <linux/connector.h>
35/*
36 * Driver name : string representing the driver name
37 * Device file : /dev file to be used for management interfaces
38 * Driver version: version string in major_version.minor_version.patch format
39 * Driver date : date information in "Mon dd yyyy" format
40 */
41#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
42#define PMCRAID_DEVFILE "pmcsas"
43#define PMCRAID_DRIVER_VERSION "1.0.2"
44#define PMCRAID_DRIVER_DATE __DATE__
45
46/* Maximum number of adapters supported by current version of the driver */
47#define PMCRAID_MAX_ADAPTERS 1024
48
49/* Bit definitions as per firmware, bit position [0][1][2].....[31] */
50#define PMC_BIT8(n) (1 << (7-n))
51#define PMC_BIT16(n) (1 << (15-n))
52#define PMC_BIT32(n) (1 << (31-n))
53
54/* PMC PCI vendor ID and device ID values */
55#define PCI_VENDOR_ID_PMC 0x11F8
56#define PCI_DEVICE_ID_PMC_MAXRAID 0x5220
57
58/*
59 * MAX_CMD : maximum commands that can be outstanding with IOA
60 * MAX_IO_CMD : command blocks available for IO commands
61 * MAX_HCAM_CMD : command blocks avaibale for HCAMS
62 * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset
63 */
64#define PMCRAID_MAX_CMD 1024
65#define PMCRAID_MAX_IO_CMD 1020
66#define PMCRAID_MAX_HCAM_CMD 2
67#define PMCRAID_MAX_INTERNAL_CMD 2
68
69/* MAX_IOADLS : max number of scatter-gather lists supported by IOA
70 * IOADLS_INTERNAL : number of ioadls included as part of IOARCB.
71 * IOADLS_EXTERNAL : number of ioadls allocated external to IOARCB
72 */
73#define PMCRAID_IOADLS_INTERNAL 27
74#define PMCRAID_IOADLS_EXTERNAL 37
75#define PMCRAID_MAX_IOADLS PMCRAID_IOADLS_INTERNAL
76
77/* HRRQ_ENTRY_SIZE : size of hrrq buffer
78 * IOARCB_ALIGNMENT : alignment required for IOARCB
79 * IOADL_ALIGNMENT : alignment requirement for IOADLs
80 * MSIX_VECTORS : number of MSIX vectors supported
81 */
82#define HRRQ_ENTRY_SIZE sizeof(__le32)
83#define PMCRAID_IOARCB_ALIGNMENT 32
84#define PMCRAID_IOADL_ALIGNMENT 16
85#define PMCRAID_IOASA_ALIGNMENT 4
86#define PMCRAID_NUM_MSIX_VECTORS 1
87
88/* various other limits */
89#define PMCRAID_VENDOR_ID_LEN 8
90#define PMCRAID_PRODUCT_ID_LEN 16
91#define PMCRAID_SERIAL_NUM_LEN 8
92#define PMCRAID_LUN_LEN 8
93#define PMCRAID_MAX_CDB_LEN 16
94#define PMCRAID_DEVICE_ID_LEN 8
95#define PMCRAID_SENSE_DATA_LEN 256
96#define PMCRAID_ADD_CMD_PARAM_LEN 48
97
98#define PMCRAID_MAX_BUS_TO_SCAN 1
99#define PMCRAID_MAX_NUM_TARGETS_PER_BUS 256
100#define PMCRAID_MAX_NUM_LUNS_PER_TARGET 8
101
102/* IOA bus/target/lun number of IOA resources */
103#define PMCRAID_IOA_BUS_ID 0xfe
104#define PMCRAID_IOA_TARGET_ID 0xff
105#define PMCRAID_IOA_LUN_ID 0xff
106#define PMCRAID_VSET_BUS_ID 0x1
107#define PMCRAID_VSET_LUN_ID 0x0
108#define PMCRAID_PHYS_BUS_ID 0x0
109#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
110#define PMCRAID_MAX_VSET_TARGETS 240
111#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
112
113#define PMCRAID_IOA_MAX_SECTORS 32767
114#define PMCRAID_VSET_MAX_SECTORS 512
115#define PMCRAID_MAX_CMD_PER_LUN 254
116
117/* Number of configuration table entries (resources) */
118#define PMCRAID_MAX_NUM_OF_VSETS 240
119
120/* Todo : Check max limit for Phase 1 */
121#define PMCRAID_MAX_NUM_OF_PHY_DEVS 256
122
123/* MAX_NUM_OF_DEVS includes 1 FP, 1 Dummy Enclosure device */
124#define PMCRAID_MAX_NUM_OF_DEVS \
125 (PMCRAID_MAX_NUM_OF_VSETS + PMCRAID_MAX_NUM_OF_PHY_DEVS + 2)
126
127#define PMCRAID_MAX_RESOURCES PMCRAID_MAX_NUM_OF_DEVS
128
129/* Adapter Commands used by driver */
130#define PMCRAID_QUERY_RESOURCE_STATE 0xC2
131#define PMCRAID_RESET_DEVICE 0xC3
132/* options to select reset target */
133#define ENABLE_RESET_MODIFIER 0x80
134#define RESET_DEVICE_LUN 0x40
135#define RESET_DEVICE_TARGET 0x20
136#define RESET_DEVICE_BUS 0x10
137
138#define PMCRAID_IDENTIFY_HRRQ 0xC4
139#define PMCRAID_QUERY_IOA_CONFIG 0xC5
140#define PMCRAID_QUERY_CMD_STATUS 0xCB
141#define PMCRAID_ABORT_CMD 0xC7
142
143/* CANCEL ALL command, provides option for setting SYNC_COMPLETE
144 * on the target resources for which commands got cancelled
145 */
146#define PMCRAID_CANCEL_ALL_REQUESTS 0xCE
147#define PMCRAID_SYNC_COMPLETE_AFTER_CANCEL PMC_BIT8(0)
148
149/* HCAM command and types of HCAM supported by IOA */
150#define PMCRAID_HOST_CONTROLLED_ASYNC 0xCF
151#define PMCRAID_HCAM_CODE_CONFIG_CHANGE 0x01
152#define PMCRAID_HCAM_CODE_LOG_DATA 0x02
153
154/* IOA shutdown command and various shutdown types */
155#define PMCRAID_IOA_SHUTDOWN 0xF7
156#define PMCRAID_SHUTDOWN_NORMAL 0x00
157#define PMCRAID_SHUTDOWN_PREPARE_FOR_NORMAL 0x40
158#define PMCRAID_SHUTDOWN_NONE 0x100
159#define PMCRAID_SHUTDOWN_ABBREV 0x80
160
161/* SET SUPPORTED DEVICES command and the option to select all the
162 * devices to be supported
163 */
164#define PMCRAID_SET_SUPPORTED_DEVICES 0xFB
165#define ALL_DEVICES_SUPPORTED PMC_BIT8(0)
166
167/* This option is used with SCSI WRITE_BUFFER command */
168#define PMCRAID_WR_BUF_DOWNLOAD_AND_SAVE 0x05
169
170/* IOASC Codes used by driver */
171#define PMCRAID_IOASC_SENSE_MASK 0xFFFFFF00
172#define PMCRAID_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
173#define PMCRAID_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
174#define PMCRAID_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8)
175#define PMCRAID_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff)
176
177#define PMCRAID_IOASC_GOOD_COMPLETION 0x00000000
178#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED 0x02040200
179#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED 0x02048000
180#define PMCRAID_IOASC_NR_SYNC_REQUIRED 0x023F0000
181#define PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC 0x03110C00
182#define PMCRAID_IOASC_HW_CANNOT_COMMUNICATE 0x04050000
183#define PMCRAID_IOASC_HW_DEVICE_TIMEOUT 0x04080100
184#define PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR 0x04448500
185#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED 0x04448600
186#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000
187#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000
188#define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000
189#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000
190
191/* Driver defined IOASCs */
192#define PMCRAID_IOASC_IOA_WAS_RESET 0x10000001
193#define PMCRAID_IOASC_PCI_ACCESS_ERROR 0x10000002
194
195/* Various timeout values (in milliseconds) used. If any of these are chip
196 * specific, move them to pmcraid_chip_details structure.
197 */
198#define PMCRAID_PCI_DEASSERT_TIMEOUT 2000
199#define PMCRAID_BIST_TIMEOUT 2000
200#define PMCRAID_AENWAIT_TIMEOUT 5000
201#define PMCRAID_TRANSOP_TIMEOUT 60000
202
203#define PMCRAID_RESET_TIMEOUT (2 * HZ)
204#define PMCRAID_CHECK_FOR_RESET_TIMEOUT ((HZ / 10))
205#define PMCRAID_VSET_IO_TIMEOUT (60 * HZ)
206#define PMCRAID_INTERNAL_TIMEOUT (60 * HZ)
207#define PMCRAID_SHUTDOWN_TIMEOUT (150 * HZ)
208#define PMCRAID_RESET_BUS_TIMEOUT (60 * HZ)
209#define PMCRAID_RESET_HOST_TIMEOUT (150 * HZ)
210#define PMCRAID_REQUEST_SENSE_TIMEOUT (30 * HZ)
211#define PMCRAID_SET_SUP_DEV_TIMEOUT (2 * 60 * HZ)
212
213/* structure to represent a scatter-gather element (IOADL descriptor) */
214struct pmcraid_ioadl_desc {
215 __le64 address;
216 __le32 data_len;
217 __u8 reserved[3];
218 __u8 flags;
219} __attribute__((packed, aligned(PMCRAID_IOADL_ALIGNMENT)));
220
221/* pmcraid_ioadl_desc.flags values */
222#define IOADL_FLAGS_CHAINED PMC_BIT8(0)
223#define IOADL_FLAGS_LAST_DESC PMC_BIT8(1)
224#define IOADL_FLAGS_READ_LAST PMC_BIT8(1)
225#define IOADL_FLAGS_WRITE_LAST PMC_BIT8(1)
226
227
228/* additional IOARCB data which can be CDB or additional request parameters
229 * or list of IOADLs. Firmware supports max of 512 bytes for IOARCB, hence then
230 * number of IOADLs are limted to 27. In case they are more than 27, they will
231 * be used in chained form
232 */
233struct pmcraid_ioarcb_add_data {
234 union {
235 struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_INTERNAL];
236 __u8 add_cmd_params[PMCRAID_ADD_CMD_PARAM_LEN];
237 } u;
238};
239
240/*
241 * IOA Request Control Block
242 */
243struct pmcraid_ioarcb {
244 __le64 ioarcb_bus_addr;
245 __le32 resource_handle;
246 __le32 response_handle;
247 __le64 ioadl_bus_addr;
248 __le32 ioadl_length;
249 __le32 data_transfer_length;
250 __le64 ioasa_bus_addr;
251 __le16 ioasa_len;
252 __le16 cmd_timeout;
253 __le16 add_cmd_param_offset;
254 __le16 add_cmd_param_length;
255 __le32 reserved1[2];
256 __le32 reserved2;
257 __u8 request_type;
258 __u8 request_flags0;
259 __u8 request_flags1;
260 __u8 hrrq_id;
261 __u8 cdb[PMCRAID_MAX_CDB_LEN];
262 struct pmcraid_ioarcb_add_data add_data;
263} __attribute__((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
264
265/* well known resource handle values */
266#define PMCRAID_IOA_RES_HANDLE 0xffffffff
267#define PMCRAID_INVALID_RES_HANDLE 0
268
269/* pmcraid_ioarcb.request_type values */
270#define REQ_TYPE_SCSI 0x00
271#define REQ_TYPE_IOACMD 0x01
272#define REQ_TYPE_HCAM 0x02
273
274/* pmcraid_ioarcb.flags0 values */
275#define TRANSFER_DIR_WRITE PMC_BIT8(0)
276#define INHIBIT_UL_CHECK PMC_BIT8(2)
277#define SYNC_OVERRIDE PMC_BIT8(3)
278#define SYNC_COMPLETE PMC_BIT8(4)
279#define NO_LINK_DESCS PMC_BIT8(5)
280
281/* pmcraid_ioarcb.flags1 values */
282#define DELAY_AFTER_RESET PMC_BIT8(0)
283#define TASK_TAG_SIMPLE 0x10
284#define TASK_TAG_ORDERED 0x20
285#define TASK_TAG_QUEUE_HEAD 0x30
286
287/* toggle bit offset in response handle */
288#define HRRQ_TOGGLE_BIT 0x01
289#define HRRQ_RESPONSE_BIT 0x02
290
291/* IOA Status Area */
292struct pmcraid_ioasa_vset {
293 __le32 failing_lba_hi;
294 __le32 failing_lba_lo;
295 __le32 reserved;
296} __attribute__((packed, aligned(4)));
297
298struct pmcraid_ioasa {
299 __le32 ioasc;
300 __le16 returned_status_length;
301 __le16 available_status_length;
302 __le32 residual_data_length;
303 __le32 ilid;
304 __le32 fd_ioasc;
305 __le32 fd_res_address;
306 __le32 fd_res_handle;
307 __le32 reserved;
308
309 /* resource specific sense information */
310 union {
311 struct pmcraid_ioasa_vset vset;
312 } u;
313
314 /* IOA autosense data */
315 __le16 auto_sense_length;
316 __le16 error_data_length;
317 __u8 sense_data[PMCRAID_SENSE_DATA_LEN];
318} __attribute__((packed, aligned(4)));
319
320#define PMCRAID_DRIVER_ILID 0xffffffff
321
322/* Config Table Entry per Resource */
323struct pmcraid_config_table_entry {
324 __u8 resource_type;
325 __u8 bus_protocol;
326 __le16 array_id;
327 __u8 common_flags0;
328 __u8 common_flags1;
329 __u8 unique_flags0;
330 __u8 unique_flags1; /*also used as vset target_id */
331 __le32 resource_handle;
332 __le32 resource_address;
333 __u8 device_id[PMCRAID_DEVICE_ID_LEN];
334 __u8 lun[PMCRAID_LUN_LEN];
335} __attribute__((packed, aligned(4)));
336
337/* resource types (config_table_entry.resource_type values) */
338#define RES_TYPE_AF_DASD 0x00
339#define RES_TYPE_GSCSI 0x01
340#define RES_TYPE_VSET 0x02
341#define RES_TYPE_IOA_FP 0xFF
342
343#define RES_IS_IOA(res) ((res).resource_type == RES_TYPE_IOA_FP)
344#define RES_IS_GSCSI(res) ((res).resource_type == RES_TYPE_GSCSI)
345#define RES_IS_VSET(res) ((res).resource_type == RES_TYPE_VSET)
346#define RES_IS_AFDASD(res) ((res).resource_type == RES_TYPE_AF_DASD)
347
348/* bus_protocol values used by driver */
349#define RES_TYPE_VENCLOSURE 0x8
350
351/* config_table_entry.common_flags0 */
352#define MULTIPATH_RESOURCE PMC_BIT32(0)
353
354/* unique_flags1 */
355#define IMPORT_MODE_MANUAL PMC_BIT8(0)
356
357/* well known resource handle values */
358#define RES_HANDLE_IOA 0xFFFFFFFF
359#define RES_HANDLE_NONE 0x00000000
360
361/* well known resource address values */
362#define RES_ADDRESS_IOAFP 0xFEFFFFFF
363#define RES_ADDRESS_INVALID 0xFFFFFFFF
364
365/* BUS/TARGET/LUN values from resource_addrr */
366#define RES_BUS(res_addr) (le32_to_cpu(res_addr) & 0xFF)
367#define RES_TARGET(res_addr) ((le32_to_cpu(res_addr) >> 16) & 0xFF)
368#define RES_LUN(res_addr) 0x0
369
370/* configuration table structure */
371struct pmcraid_config_table {
372 __le16 num_entries;
373 __u8 table_format;
374 __u8 reserved1;
375 __u8 flags;
376 __u8 reserved2[11];
377 struct pmcraid_config_table_entry entries[PMCRAID_MAX_RESOURCES];
378} __attribute__((packed, aligned(4)));
379
380/* config_table.flags value */
381#define MICROCODE_UPDATE_REQUIRED PMC_BIT32(0)
382
383/*
384 * HCAM format
385 */
386#define PMCRAID_HOSTRCB_LDNSIZE 4056
387
388/* Error log notification format */
389struct pmcraid_hostrcb_error {
390 __le32 fd_ioasc;
391 __le32 fd_ra;
392 __le32 fd_rh;
393 __le32 prc;
394 union {
395 __u8 data[PMCRAID_HOSTRCB_LDNSIZE];
396 } u;
397} __attribute__ ((packed, aligned(4)));
398
399struct pmcraid_hcam_hdr {
400 __u8 op_code;
401 __u8 notification_type;
402 __u8 notification_lost;
403 __u8 flags;
404 __u8 overlay_id;
405 __u8 reserved1[3];
406 __le32 ilid;
407 __le32 timestamp1;
408 __le32 timestamp2;
409 __le32 data_len;
410} __attribute__((packed, aligned(4)));
411
412#define PMCRAID_AEN_GROUP 0x3
413
414struct pmcraid_hcam_ccn {
415 struct pmcraid_hcam_hdr header;
416 struct pmcraid_config_table_entry cfg_entry;
417} __attribute__((packed, aligned(4)));
418
419struct pmcraid_hcam_ldn {
420 struct pmcraid_hcam_hdr header;
421 struct pmcraid_hostrcb_error error_log;
422} __attribute__((packed, aligned(4)));
423
424/* pmcraid_hcam.op_code values */
425#define HOSTRCB_TYPE_CCN 0xE1
426#define HOSTRCB_TYPE_LDN 0xE2
427
428/* pmcraid_hcam.notification_type values */
429#define NOTIFICATION_TYPE_ENTRY_CHANGED 0x0
430#define NOTIFICATION_TYPE_ENTRY_NEW 0x1
431#define NOTIFICATION_TYPE_ENTRY_DELETED 0x2
432#define NOTIFICATION_TYPE_ERROR_LOG 0x10
433#define NOTIFICATION_TYPE_INFORMATION_LOG 0x11
434
435#define HOSTRCB_NOTIFICATIONS_LOST PMC_BIT8(0)
436
437/* pmcraid_hcam.flags values */
438#define HOSTRCB_INTERNAL_OP_ERROR PMC_BIT8(0)
439#define HOSTRCB_ERROR_RESPONSE_SENT PMC_BIT8(1)
440
441/* pmcraid_hcam.overlay_id values */
442#define HOSTRCB_OVERLAY_ID_08 0x08
443#define HOSTRCB_OVERLAY_ID_09 0x09
444#define HOSTRCB_OVERLAY_ID_11 0x11
445#define HOSTRCB_OVERLAY_ID_12 0x12
446#define HOSTRCB_OVERLAY_ID_13 0x13
447#define HOSTRCB_OVERLAY_ID_14 0x14
448#define HOSTRCB_OVERLAY_ID_16 0x16
449#define HOSTRCB_OVERLAY_ID_17 0x17
450#define HOSTRCB_OVERLAY_ID_20 0x20
451#define HOSTRCB_OVERLAY_ID_FF 0xFF
452
453/* Implementation specific card details */
454struct pmcraid_chip_details {
455 /* hardware register offsets */
456 unsigned long ioastatus;
457 unsigned long ioarrin;
458 unsigned long mailbox;
459 unsigned long global_intr_mask;
460 unsigned long ioa_host_intr;
461 unsigned long ioa_host_intr_clr;
462 unsigned long ioa_host_mask;
463 unsigned long ioa_host_mask_clr;
464 unsigned long host_ioa_intr;
465 unsigned long host_ioa_intr_clr;
466
467 /* timeout used during transitional to operational state */
468 unsigned long transop_timeout;
469};
470
471/* IOA to HOST doorbells (interrupts) */
472#define INTRS_TRANSITION_TO_OPERATIONAL PMC_BIT32(0)
473#define INTRS_IOARCB_TRANSFER_FAILED PMC_BIT32(3)
474#define INTRS_IOA_UNIT_CHECK PMC_BIT32(4)
475#define INTRS_NO_HRRQ_FOR_CMD_RESPONSE PMC_BIT32(5)
476#define INTRS_CRITICAL_OP_IN_PROGRESS PMC_BIT32(6)
477#define INTRS_IO_DEBUG_ACK PMC_BIT32(7)
478#define INTRS_IOARRIN_LOST PMC_BIT32(27)
479#define INTRS_SYSTEM_BUS_MMIO_ERROR PMC_BIT32(28)
480#define INTRS_IOA_PROCESSOR_ERROR PMC_BIT32(29)
481#define INTRS_HRRQ_VALID PMC_BIT32(30)
482#define INTRS_OPERATIONAL_STATUS PMC_BIT32(0)
483
484/* Host to IOA Doorbells */
485#define DOORBELL_RUNTIME_RESET PMC_BIT32(1)
486#define DOORBELL_IOA_RESET_ALERT PMC_BIT32(7)
487#define DOORBELL_IOA_DEBUG_ALERT PMC_BIT32(9)
488#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS PMC_BIT32(8)
489#define DOORBELL_IOA_START_BIST PMC_BIT32(23)
490#define DOORBELL_RESET_IOA PMC_BIT32(31)
491
492/* Global interrupt mask register value */
493#define GLOBAL_INTERRUPT_MASK 0x4ULL
494
495#define PMCRAID_ERROR_INTERRUPTS (INTRS_IOARCB_TRANSFER_FAILED | \
496 INTRS_IOA_UNIT_CHECK | \
497 INTRS_NO_HRRQ_FOR_CMD_RESPONSE | \
498 INTRS_IOARRIN_LOST | \
499 INTRS_SYSTEM_BUS_MMIO_ERROR | \
500 INTRS_IOA_PROCESSOR_ERROR)
501
502#define PMCRAID_PCI_INTERRUPTS (PMCRAID_ERROR_INTERRUPTS | \
503 INTRS_HRRQ_VALID | \
504 INTRS_CRITICAL_OP_IN_PROGRESS |\
505 INTRS_TRANSITION_TO_OPERATIONAL)
506
507/* control_block, associated with each of the commands contains IOARCB, IOADLs
508 * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support
509 * additional request parameters (of max size 48) any command.
510 */
511struct pmcraid_control_block {
512 struct pmcraid_ioarcb ioarcb;
513 struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_EXTERNAL + 3];
514 struct pmcraid_ioasa ioasa;
515} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
516
517/* pmcraid_sglist - Scatter-gather list allocated for passthrough ioctls
518 */
519struct pmcraid_sglist {
520 u32 order;
521 u32 num_sg;
522 u32 num_dma_sg;
523 u32 buffer_len;
524 struct scatterlist scatterlist[1];
525};
526
527/* pmcraid_cmd - LLD representation of SCSI command */
528struct pmcraid_cmd {
529
530 /* Ptr and bus address of DMA.able control block for this command */
531 struct pmcraid_control_block *ioa_cb;
532 dma_addr_t ioa_cb_bus_addr;
533
534 /* sense buffer for REQUEST SENSE command if firmware is not sending
535 * auto sense data
536 */
537 dma_addr_t sense_buffer_dma;
538 dma_addr_t dma_handle;
539 u8 *sense_buffer;
540
541 /* pointer to mid layer structure of SCSI commands */
542 struct scsi_cmnd *scsi_cmd;
543
544 struct list_head free_list;
545 struct completion wait_for_completion;
546 struct timer_list timer; /* needed for internal commands */
547 u32 timeout; /* current timeout value */
548 u32 index; /* index into the command list */
549 u8 completion_req; /* for handling internal commands */
550 u8 release; /* for handling completions */
551
552 void (*cmd_done) (struct pmcraid_cmd *);
553 struct pmcraid_instance *drv_inst;
554
555 struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */
556
557 /* scratch used during reset sequence */
558 union {
559 unsigned long time_left;
560 struct pmcraid_resource_entry *res;
561 } u;
562};
563
564/*
565 * Interrupt registers of IOA
566 */
567struct pmcraid_interrupts {
568 void __iomem *ioa_host_interrupt_reg;
569 void __iomem *ioa_host_interrupt_clr_reg;
570 void __iomem *ioa_host_interrupt_mask_reg;
571 void __iomem *ioa_host_interrupt_mask_clr_reg;
572 void __iomem *global_interrupt_mask_reg;
573 void __iomem *host_ioa_interrupt_reg;
574 void __iomem *host_ioa_interrupt_clr_reg;
575};
576
577/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
578struct pmcraid_isr_param {
579 u8 hrrq_id; /* hrrq entry index */
580 u16 vector; /* allocated msi-x vector */
581 struct pmcraid_instance *drv_inst;
582};
583
584/* AEN message header sent as part of event data to applications */
585struct pmcraid_aen_msg {
586 u32 hostno;
587 u32 length;
588 u8 reserved[8];
589 u8 data[0];
590};
591
592struct pmcraid_hostrcb {
593 struct pmcraid_instance *drv_inst;
594 struct pmcraid_aen_msg *msg;
595 struct pmcraid_hcam_hdr *hcam; /* pointer to hcam buffer */
596 struct pmcraid_cmd *cmd; /* pointer to command block used */
597 dma_addr_t baddr; /* system address of hcam buffer */
598 atomic_t ignore; /* process HCAM response ? */
599};
600
601#define PMCRAID_AEN_HDR_SIZE sizeof(struct pmcraid_aen_msg)
602
603
604
605/*
606 * Per adapter structure maintained by LLD
607 */
608struct pmcraid_instance {
609 /* Array of allowed-to-be-exposed resources, initialized from
610 * Configutation Table, later updated with CCNs
611 */
612 struct pmcraid_resource_entry *res_entries;
613
614 struct list_head free_res_q; /* res_entries lists for easy lookup */
615 struct list_head used_res_q; /* List of to be exposed resources */
616 spinlock_t resource_lock; /* spinlock to protect resource list */
617
618 void __iomem *mapped_dma_addr;
619 void __iomem *ioa_status; /* Iomapped IOA status register */
620 void __iomem *mailbox; /* Iomapped mailbox register */
621 void __iomem *ioarrin; /* IOmapped IOARR IN register */
622
623 struct pmcraid_interrupts int_regs;
624 struct pmcraid_chip_details *chip_cfg;
625
626 /* HostRCBs needed for HCAM */
627 struct pmcraid_hostrcb ldn;
628 struct pmcraid_hostrcb ccn;
629
630
631 /* Bus address of start of HRRQ */
632 dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS];
633
634 /* Pointer to 1st entry of HRRQ */
635 __be32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS];
636
637 /* Pointer to last entry of HRRQ */
638 __be32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS];
639
640 /* Pointer to current pointer of hrrq */
641 __be32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS];
642
643 /* Lock for HRRQ access */
644 spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS];
645
646 /* Expected toggle bit at host */
647 u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS];
648
649 /* No of Reset IOA retries . IOA marked dead if threshold exceeds */
650 u8 ioa_reset_attempts;
651#define PMCRAID_RESET_ATTEMPTS 3
652
653 /* Wait Q for threads to wait for Reset IOA completion */
654 wait_queue_head_t reset_wait_q;
655 struct pmcraid_cmd *reset_cmd;
656
657 /* structures for supporting SIGIO based AEN. */
658 struct fasync_struct *aen_queue;
659 struct mutex aen_queue_lock; /* lock for aen subscribers list */
660 struct cdev cdev;
661
662 struct Scsi_Host *host; /* mid layer interface structure handle */
663 struct pci_dev *pdev; /* PCI device structure handle */
664
665 u8 current_log_level; /* default level for logging IOASC errors */
666
667 u8 num_hrrq; /* Number of interrupt vectors allocated */
668 dev_t dev; /* Major-Minor numbers for Char device */
669
670 /* Used as ISR handler argument */
671 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
672
673 /* configuration table */
674 struct pmcraid_config_table *cfg_table;
675 dma_addr_t cfg_table_bus_addr;
676
677 /* structures related to command blocks */
678 struct kmem_cache *cmd_cachep; /* cache for cmd blocks */
679 struct pci_pool *control_pool; /* pool for control blocks */
680 char cmd_pool_name[64]; /* name of cmd cache */
681 char ctl_pool_name[64]; /* name of control cache */
682
683 struct pmcraid_cmd *cmd_list[PMCRAID_MAX_CMD];
684
685 struct list_head free_cmd_pool;
686 struct list_head pending_cmd_pool;
687 spinlock_t free_pool_lock; /* free pool lock */
688 spinlock_t pending_pool_lock; /* pending pool lock */
689
690 /* No of IO commands pending with FW */
691 atomic_t outstanding_cmds;
692
693 /* should add/delete resources to mid-layer now ?*/
694 atomic_t expose_resources;
695
696 /* Tasklet to handle deferred processing */
697 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
698
699 /* Work-queue (Shared) for deferred reset processing */
700 struct work_struct worker_q;
701
702
703 u32 ioa_state:4; /* For IOA Reset sequence FSM */
704#define IOA_STATE_OPERATIONAL 0x0
705#define IOA_STATE_UNKNOWN 0x1
706#define IOA_STATE_DEAD 0x2
707#define IOA_STATE_IN_SOFT_RESET 0x3
708#define IOA_STATE_IN_HARD_RESET 0x4
709#define IOA_STATE_IN_RESET_ALERT 0x5
710#define IOA_STATE_IN_BRINGDOWN 0x6
711#define IOA_STATE_IN_BRINGUP 0x7
712
713 u32 ioa_reset_in_progress:1; /* true if IOA reset is in progress */
714 u32 ioa_hard_reset:1; /* TRUE if Hard Reset is needed */
715 u32 ioa_unit_check:1; /* Indicates Unit Check condition */
716 u32 ioa_bringdown:1; /* whether IOA needs to be brought down */
717 u32 force_ioa_reset:1; /* force adapter reset ? */
718 u32 reinit_cfg_table:1; /* reinit config table due to lost CCN */
719 u32 ioa_shutdown_type:2;/* shutdown type used during reset */
720#define SHUTDOWN_NONE 0x0
721#define SHUTDOWN_NORMAL 0x1
722#define SHUTDOWN_ABBREV 0x2
723
724};
725
726/* LLD maintained resource entry structure */
727struct pmcraid_resource_entry {
728 struct list_head queue; /* link to "to be exposed" resources */
729 struct pmcraid_config_table_entry cfg_entry;
730 struct scsi_device *scsi_dev; /* Link scsi_device structure */
731 atomic_t read_failures; /* count of failed READ commands */
732 atomic_t write_failures; /* count of failed WRITE commands */
733
734 /* To indicate add/delete/modify during CCN */
735 u8 change_detected;
736#define RES_CHANGE_ADD 0x1 /* add this to mid-layer */
737#define RES_CHANGE_DEL 0x2 /* remove this from mid-layer */
738
739 u8 reset_progress; /* Device is resetting */
740
741 /*
742 * When IOA asks for sync (i.e. IOASC = Not Ready, Sync Required), this
743 * flag will be set, mid layer will be asked to retry. In the next
744 * attempt, this flag will be checked in queuecommand() to set
745 * SYNC_COMPLETE flag in IOARCB (flag_0).
746 */
747 u8 sync_reqd;
748
749 /* target indicates the mapped target_id assigned to this resource if
750 * this is VSET resource. For non-VSET resources this will be un-used
751 * or zero
752 */
753 u8 target;
754};
755
756/* Data structures used in IOASC error code logging */
757struct pmcraid_ioasc_error {
758 u32 ioasc_code; /* IOASC code */
759 u8 log_level; /* default log level assignment. */
760 char *error_string;
761};
762
763/* Initial log_level assignments for various IOASCs */
764#define IOASC_LOG_LEVEL_NONE 0x0 /* no logging */
765#define IOASC_LOG_LEVEL_MUST 0x1 /* must log: all high-severity errors */
766#define IOASC_LOG_LEVEL_HARD 0x2 /* optional – low severity errors */
767
768/* Error information maintained by LLD. LLD initializes the pmcraid_error_table
769 * statically.
770 */
771static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
772 {0x01180600, IOASC_LOG_LEVEL_MUST,
773 "Recovered Error, soft media error, sector reassignment suggested"},
774 {0x015D0000, IOASC_LOG_LEVEL_MUST,
775 "Recovered Error, failure prediction thresold exceeded"},
776 {0x015D9200, IOASC_LOG_LEVEL_MUST,
777 "Recovered Error, soft Cache Card Battery error thresold"},
778 {0x015D9200, IOASC_LOG_LEVEL_MUST,
779 "Recovered Error, soft Cache Card Battery error thresold"},
780 {0x02048000, IOASC_LOG_LEVEL_MUST,
781 "Not Ready, IOA Reset Required"},
782 {0x02408500, IOASC_LOG_LEVEL_MUST,
783 "Not Ready, IOA microcode download required"},
784 {0x03110B00, IOASC_LOG_LEVEL_MUST,
785 "Medium Error, data unreadable, reassignment suggested"},
786 {0x03110C00, IOASC_LOG_LEVEL_MUST,
787 "Medium Error, data unreadable do not reassign"},
788 {0x03310000, IOASC_LOG_LEVEL_MUST,
789 "Medium Error, media corrupted"},
790 {0x04050000, IOASC_LOG_LEVEL_MUST,
791 "Hardware Error, IOA can't communicate with device"},
792 {0x04080000, IOASC_LOG_LEVEL_MUST,
793 "Hardware Error, device bus error"},
794 {0x04080000, IOASC_LOG_LEVEL_MUST,
795 "Hardware Error, device bus is not functioning"},
796 {0x04118000, IOASC_LOG_LEVEL_MUST,
797 "Hardware Error, IOA reserved area data check"},
798 {0x04118100, IOASC_LOG_LEVEL_MUST,
799 "Hardware Error, IOA reserved area invalid data pattern"},
800 {0x04118200, IOASC_LOG_LEVEL_MUST,
801 "Hardware Error, IOA reserved area LRC error"},
802 {0x04320000, IOASC_LOG_LEVEL_MUST,
803 "Hardware Error, reassignment space exhausted"},
804 {0x04330000, IOASC_LOG_LEVEL_MUST,
805 "Hardware Error, data transfer underlength error"},
806 {0x04330000, IOASC_LOG_LEVEL_MUST,
807 "Hardware Error, data transfer overlength error"},
808 {0x04418000, IOASC_LOG_LEVEL_MUST,
809 "Hardware Error, PCI bus error"},
810 {0x04440000, IOASC_LOG_LEVEL_MUST,
811 "Hardware Error, device error"},
812 {0x04448300, IOASC_LOG_LEVEL_MUST,
813 "Hardware Error, undefined device response"},
814 {0x04448400, IOASC_LOG_LEVEL_MUST,
815 "Hardware Error, IOA microcode error"},
816 {0x04448600, IOASC_LOG_LEVEL_MUST,
817 "Hardware Error, IOA reset required"},
818 {0x04449200, IOASC_LOG_LEVEL_MUST,
819 "Hardware Error, hard Cache Fearuee Card Battery error"},
820 {0x0444A000, IOASC_LOG_LEVEL_MUST,
821 "Hardware Error, failed device altered"},
822 {0x0444A200, IOASC_LOG_LEVEL_MUST,
823 "Hardware Error, data check after reassignment"},
824 {0x0444A300, IOASC_LOG_LEVEL_MUST,
825 "Hardware Error, LRC error after reassignment"},
826 {0x044A0000, IOASC_LOG_LEVEL_MUST,
827 "Hardware Error, device bus error (msg/cmd phase)"},
828 {0x04670400, IOASC_LOG_LEVEL_MUST,
829 "Hardware Error, new device can't be used"},
830 {0x04678000, IOASC_LOG_LEVEL_MUST,
831 "Hardware Error, invalid multiadapter configuration"},
832 {0x04678100, IOASC_LOG_LEVEL_MUST,
833 "Hardware Error, incorrect connection between enclosures"},
834 {0x04678200, IOASC_LOG_LEVEL_MUST,
835 "Hardware Error, connections exceed IOA design limits"},
836 {0x04678300, IOASC_LOG_LEVEL_MUST,
837 "Hardware Error, incorrect multipath connection"},
838 {0x04679000, IOASC_LOG_LEVEL_MUST,
839 "Hardware Error, command to LUN failed"},
840 {0x064C8000, IOASC_LOG_LEVEL_HARD,
841 "Unit Attention, cache exists for missing/failed device"},
842 {0x06670100, IOASC_LOG_LEVEL_HARD,
843 "Unit Attention, incompatible exposed mode device"},
844 {0x06670600, IOASC_LOG_LEVEL_HARD,
845 "Unit Attention, attachment of logical unit failed"},
846 {0x06678000, IOASC_LOG_LEVEL_MUST,
847 "Unit Attention, cables exceed connective design limit"},
848 {0x06678300, IOASC_LOG_LEVEL_MUST,
849 "Unit Attention, incomplete multipath connection between" \
850 "IOA and enclosure"},
851 {0x06678400, IOASC_LOG_LEVEL_MUST,
852 "Unit Attention, incomplete multipath connection between" \
853 "device and enclosure"},
854 {0x06678500, IOASC_LOG_LEVEL_MUST,
855 "Unit Attention, incomplete multipath connection between" \
856 "IOA and remote IOA"},
857 {0x06678600, IOASC_LOG_LEVEL_HARD,
858 "Unit Attention, missing remote IOA"},
859 {0x06679100, IOASC_LOG_LEVEL_HARD,
860 "Unit Attention, enclosure doesn't support required multipath" \
861 "function"},
862 {0x06698200, IOASC_LOG_LEVEL_HARD,
863 "Unit Attention, corrupt array parity detected on device"},
864 {0x066B0200, IOASC_LOG_LEVEL_MUST,
865 "Unit Attention, array exposed"},
866 {0x066B8200, IOASC_LOG_LEVEL_HARD,
867 "Unit Attention, exposed array is still protected"},
868 {0x066B9200, IOASC_LOG_LEVEL_MUST,
869 "Unit Attention, Multipath redundancy level got worse"},
870 {0x07270000, IOASC_LOG_LEVEL_HARD,
871 "Data Protect, device is read/write protected by IOA"},
872 {0x07278000, IOASC_LOG_LEVEL_HARD,
873 "Data Protect, IOA doesn't support device attribute"},
874 {0x07278100, IOASC_LOG_LEVEL_HARD,
875 "Data Protect, NVRAM mirroring prohibited"},
876 {0x07278400, IOASC_LOG_LEVEL_MUST,
877 "Data Protect, array is short 2 or more devices"},
878 {0x07278600, IOASC_LOG_LEVEL_MUST,
879 "Data Protect, exposed array is short a required device"},
880 {0x07278700, IOASC_LOG_LEVEL_MUST,
881 "Data Protect, array members not at required addresses"},
882 {0x07278800, IOASC_LOG_LEVEL_MUST,
883 "Data Protect, exposed mode device resource address conflict"},
884 {0x07278900, IOASC_LOG_LEVEL_MUST,
885 "Data Protect, incorrect resource address of exposed mode device"},
886 {0x07278A00, IOASC_LOG_LEVEL_MUST,
887 "Data Protect, Array is missing a device and parity is out of sync"},
888 {0x07278B00, IOASC_LOG_LEVEL_MUST,
889 "Data Protect, maximum number of arrays already exist"},
890 {0x07278C00, IOASC_LOG_LEVEL_HARD,
891 "Data Protect, cannot locate cache data for device"},
892 {0x07278D00, IOASC_LOG_LEVEL_HARD,
893 "Data Protect, cache data exits for a changed device"},
894 {0x07279100, IOASC_LOG_LEVEL_MUST,
895 "Data Protect, detection of a device requiring format"},
896 {0x07279200, IOASC_LOG_LEVEL_MUST,
897 "Data Protect, IOA exceeds maximum number of devices"},
898 {0x07279600, IOASC_LOG_LEVEL_MUST,
899 "Data Protect, missing array, volume set is not functional"},
900 {0x07279700, IOASC_LOG_LEVEL_MUST,
901 "Data Protect, single device for a volume set"},
902 {0x07279800, IOASC_LOG_LEVEL_MUST,
903 "Data Protect, missing multiple devices for a volume set"},
904 {0x07279900, IOASC_LOG_LEVEL_HARD,
905 "Data Protect, maximum number of volument sets already exists"},
906 {0x07279A00, IOASC_LOG_LEVEL_MUST,
907 "Data Protect, other volume set problem"},
908};
909
910/* macros to help in debugging */
911#define pmcraid_err(...) \
912 printk(KERN_ERR "MaxRAID: "__VA_ARGS__)
913
914#define pmcraid_info(...) \
915 if (pmcraid_debug_log) \
916 printk(KERN_INFO "MaxRAID: "__VA_ARGS__)
917
918/* check if given command is a SCSI READ or SCSI WRITE command */
919#define SCSI_READ_CMD 0x1 /* any of SCSI READ commands */
920#define SCSI_WRITE_CMD 0x2 /* any of SCSI WRITE commands */
921#define SCSI_CMD_TYPE(opcode) \
922({ u8 op = opcode; u8 __type = 0;\
923 if (op == READ_6 || op == READ_10 || op == READ_12 || op == READ_16)\
924 __type = SCSI_READ_CMD;\
925 else if (op == WRITE_6 || op == WRITE_10 || op == WRITE_12 || \
926 op == WRITE_16)\
927 __type = SCSI_WRITE_CMD;\
928 __type;\
929})
930
931#define IS_SCSI_READ_WRITE(opcode) \
932({ u8 __type = SCSI_CMD_TYPE(opcode); \
933 (__type == SCSI_READ_CMD || __type == SCSI_WRITE_CMD) ? 1 : 0;\
934})
935
936
937/*
938 * pmcraid_ioctl_header - definition of header structure that preceeds all the
939 * buffers given as ioctl arguements.
940 *
941 * .signature : always ASCII string, "PMCRAID"
942 * .reserved : not used
943 * .buffer_length : length of the buffer following the header
944 */
945struct pmcraid_ioctl_header {
946 u8 signature[8];
947 u32 reserved;
948 u32 buffer_length;
949};
950
951#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
952
953
954/*
955 * pmcraid_event_details - defines AEN details that apps can retrieve from LLD
956 *
957 * .rcb_ccn - complete RCB of CCN
958 * .rcb_ldn - complete RCB of CCN
959 */
960struct pmcraid_event_details {
961 struct pmcraid_hcam_ccn rcb_ccn;
962 struct pmcraid_hcam_ldn rcb_ldn;
963};
964
965/*
966 * pmcraid_driver_ioctl_buffer - structure passed as argument to most of the
967 * PMC driver handled ioctls.
968 */
969struct pmcraid_driver_ioctl_buffer {
970 struct pmcraid_ioctl_header ioctl_header;
971 struct pmcraid_event_details event_details;
972};
973
974/*
975 * pmcraid_passthrough_ioctl_buffer - structure given as argument to
976 * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
977 * 32-byte alignment so, it is necessary to pack this structure to avoid any
978 * holes between ioctl_header and passthrough buffer
979 *
980 * .ioactl_header : ioctl header
981 * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
982 * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
983 * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
984 * the transfer directions passed in ioarcb.flags0. Contents
985 * of this buffer are valid only when ioarcb.data_transfer_len
986 * is not zero.
987 */
988struct pmcraid_passthrough_ioctl_buffer {
989 struct pmcraid_ioctl_header ioctl_header;
990 struct pmcraid_ioarcb ioarcb;
991 struct pmcraid_ioasa ioasa;
992 u8 request_buffer[1];
993} __attribute__ ((packed));
994
995/*
996 * keys to differentiate between driver handled IOCTLs and passthrough
997 * IOCTLs passed to IOA. driver determines the ioctl type using macro
998 * _IOC_TYPE
999 */
1000#define PMCRAID_DRIVER_IOCTL 'D'
1001#define PMCRAID_PASSTHROUGH_IOCTL 'F'
1002
1003#define DRV_IOCTL(n, size) \
1004 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
1005
1006#define FMW_IOCTL(n, size) \
1007 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
1008
1009/*
1010 * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
1011 * This is to facilitate applications avoiding un-necessary memory allocations.
1012 * For example, most of driver handled ioctls do not require ioarcb, ioasa.
1013 */
1014#define _ARGSIZE(arg) (sizeof(struct pmcraid_ioctl_header) + sizeof(arg))
1015
1016/* Driver handled IOCTL command definitions */
1017
1018#define PMCRAID_IOCTL_RESET_ADAPTER \
1019 DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
1020
1021/* passthrough/firmware handled commands */
1022#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
1023 FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
1024
1025#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
1026 FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
1027
1028
1029#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 0f8796201504..fbcb82a2f7f4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1670,7 +1670,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1670 1670
1671 qla24xx_vport_disable(fc_vport, disable); 1671 qla24xx_vport_disable(fc_vport, disable);
1672 1672
1673 if (ql2xmultique_tag) { 1673 if (ha->flags.cpu_affinity_enabled) {
1674 req = ha->req_q_map[1]; 1674 req = ha->req_q_map[1];
1675 goto vport_queue; 1675 goto vport_queue;
1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
@@ -1736,6 +1736,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1736 1736
1737 qla24xx_deallocate_vp_id(vha); 1737 qla24xx_deallocate_vp_id(vha);
1738 1738
1739 mutex_lock(&ha->vport_lock);
1740 ha->cur_vport_count--;
1741 clear_bit(vha->vp_idx, ha->vp_idx_map);
1742 mutex_unlock(&ha->vport_lock);
1743
1739 if (vha->timer_active) { 1744 if (vha->timer_active) {
1740 qla2x00_vp_stop_timer(vha); 1745 qla2x00_vp_stop_timer(vha);
1741 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " 1746 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
@@ -1743,7 +1748,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1743 vha->host_no, vha->vp_idx, vha)); 1748 vha->host_no, vha->vp_idx, vha));
1744 } 1749 }
1745 1750
1746 if (vha->req->id && !ql2xmultique_tag) { 1751 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1747 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1752 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1748 qla_printk(KERN_WARNING, ha, 1753 qla_printk(KERN_WARNING, ha,
1749 "Queue delete failed.\n"); 1754 "Queue delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 00aa48d975a6..215061861794 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -189,6 +189,7 @@ struct req_que;
189 */ 189 */
190typedef struct srb { 190typedef struct srb {
191 struct fc_port *fcport; 191 struct fc_port *fcport;
192 uint32_t handle;
192 193
193 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 194 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
194 195
@@ -196,6 +197,8 @@ typedef struct srb {
196 197
197 uint32_t request_sense_length; 198 uint32_t request_sense_length;
198 uint8_t *request_sense_ptr; 199 uint8_t *request_sense_ptr;
200
201 void *ctx;
199} srb_t; 202} srb_t;
200 203
201/* 204/*
@@ -204,6 +207,28 @@ typedef struct srb {
204#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ 207#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
205 208
206/* 209/*
210 * SRB extensions.
211 */
212struct srb_ctx {
213#define SRB_LOGIN_CMD 1
214#define SRB_LOGOUT_CMD 2
215 uint16_t type;
216 struct timer_list timer;
217
218 void (*free)(srb_t *sp);
219 void (*timeout)(srb_t *sp);
220};
221
222struct srb_logio {
223 struct srb_ctx ctx;
224
225#define SRB_LOGIN_RETRIED BIT_0
226#define SRB_LOGIN_COND_PLOGI BIT_1
227#define SRB_LOGIN_SKIP_PRLI BIT_2
228 uint16_t flags;
229};
230
231/*
207 * ISP I/O Register Set structure definitions. 232 * ISP I/O Register Set structure definitions.
208 */ 233 */
209struct device_reg_2xxx { 234struct device_reg_2xxx {
@@ -1482,7 +1507,7 @@ typedef union {
1482 uint8_t domain; 1507 uint8_t domain;
1483 uint8_t area; 1508 uint8_t area;
1484 uint8_t al_pa; 1509 uint8_t al_pa;
1485#elif __LITTLE_ENDIAN 1510#elif defined(__LITTLE_ENDIAN)
1486 uint8_t al_pa; 1511 uint8_t al_pa;
1487 uint8_t area; 1512 uint8_t area;
1488 uint8_t domain; 1513 uint8_t domain;
@@ -1565,6 +1590,7 @@ typedef struct fc_port {
1565#define FCF_FABRIC_DEVICE BIT_0 1590#define FCF_FABRIC_DEVICE BIT_0
1566#define FCF_LOGIN_NEEDED BIT_1 1591#define FCF_LOGIN_NEEDED BIT_1
1567#define FCF_TAPE_PRESENT BIT_2 1592#define FCF_TAPE_PRESENT BIT_2
1593#define FCF_FCP2_DEVICE BIT_3
1568 1594
1569/* No loop ID flag. */ 1595/* No loop ID flag. */
1570#define FC_NO_LOOP_ID 0x1000 1596#define FC_NO_LOOP_ID 0x1000
@@ -2093,6 +2119,10 @@ struct qla_msix_entry {
2093enum qla_work_type { 2119enum qla_work_type {
2094 QLA_EVT_AEN, 2120 QLA_EVT_AEN,
2095 QLA_EVT_IDC_ACK, 2121 QLA_EVT_IDC_ACK,
2122 QLA_EVT_ASYNC_LOGIN,
2123 QLA_EVT_ASYNC_LOGIN_DONE,
2124 QLA_EVT_ASYNC_LOGOUT,
2125 QLA_EVT_ASYNC_LOGOUT_DONE,
2096}; 2126};
2097 2127
2098 2128
@@ -2111,6 +2141,11 @@ struct qla_work_evt {
2111#define QLA_IDC_ACK_REGS 7 2141#define QLA_IDC_ACK_REGS 7
2112 uint16_t mb[QLA_IDC_ACK_REGS]; 2142 uint16_t mb[QLA_IDC_ACK_REGS];
2113 } idc_ack; 2143 } idc_ack;
2144 struct {
2145 struct fc_port *fcport;
2146#define QLA_LOGIO_LOGIN_RETRIED BIT_0
2147 u16 data[2];
2148 } logio;
2114 } u; 2149 } u;
2115}; 2150};
2116 2151
@@ -2224,6 +2259,7 @@ struct qla_hw_data {
2224 uint32_t chip_reset_done :1; 2259 uint32_t chip_reset_done :1;
2225 uint32_t port0 :1; 2260 uint32_t port0 :1;
2226 uint32_t running_gold_fw :1; 2261 uint32_t running_gold_fw :1;
2262 uint32_t cpu_affinity_enabled :1;
2227 } flags; 2263 } flags;
2228 2264
2229 /* This spinlock is used to protect "io transactions", you must 2265 /* This spinlock is used to protect "io transactions", you must
@@ -2350,6 +2386,7 @@ struct qla_hw_data {
2350 (ha)->flags.msix_enabled) 2386 (ha)->flags.msix_enabled)
2351#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) 2387#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
2352#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) 2388#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
2389#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
2353 2390
2354#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) 2391#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2355#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) 2392#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index dfde2dd865cb..66a8da5d7d08 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1126,7 +1126,7 @@ struct vp_config_entry_24xx {
1126 uint16_t id; 1126 uint16_t id;
1127 uint16_t reserved_4; 1127 uint16_t reserved_4;
1128 uint16_t hopct; 1128 uint16_t hopct;
1129 uint8_t reserved_5; 1129 uint8_t reserved_5[2];
1130}; 1130};
1131 1131
1132#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */ 1132#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 65b12d82867c..f3d1d1afa95b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -52,6 +52,14 @@ extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
52 52
53extern void qla84xx_put_chip(struct scsi_qla_host *); 53extern void qla84xx_put_chip(struct scsi_qla_host *);
54 54
55extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
56 uint16_t *);
57extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
58extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
59 uint16_t *);
60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
61 uint16_t *);
62
55/* 63/*
56 * Global Data in qla_os.c source file. 64 * Global Data in qla_os.c source file.
57 */ 65 */
@@ -76,6 +84,15 @@ extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
76extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum 84extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
77 fc_host_event_code, u32); 85 fc_host_event_code, u32);
78extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); 86extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
87extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
88 uint16_t *);
89extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *,
90 fc_port_t *, uint16_t *);
91extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
92 uint16_t *);
93extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
94 fc_port_t *, uint16_t *);
95
79extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); 96extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
80 97
81extern void qla2x00_abort_fcport_cmds(fc_port_t *); 98extern void qla2x00_abort_fcport_cmds(fc_port_t *);
@@ -83,6 +100,8 @@ extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
83 struct qla_hw_data *); 100 struct qla_hw_data *);
84extern void qla2x00_free_host(struct scsi_qla_host *); 101extern void qla2x00_free_host(struct scsi_qla_host *);
85extern void qla2x00_relogin(struct scsi_qla_host *); 102extern void qla2x00_relogin(struct scsi_qla_host *);
103extern void qla2x00_do_work(struct scsi_qla_host *);
104
86/* 105/*
87 * Global Functions in qla_mid.c source file. 106 * Global Functions in qla_mid.c source file.
88 */ 107 */
@@ -135,6 +154,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
135 uint16_t, uint16_t, uint8_t); 154 uint16_t, uint16_t, uint8_t);
136int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 155int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
137 uint16_t, uint16_t, uint8_t); 156 uint16_t, uint16_t, uint8_t);
157extern int qla2x00_start_sp(srb_t *);
138 158
139/* 159/*
140 * Global Function Prototypes in qla_mbx.c source file. 160 * Global Function Prototypes in qla_mbx.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 917534b9f221..4647015eba63 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1674,6 +1674,10 @@ int
1674qla2x00_fdmi_register(scsi_qla_host_t *vha) 1674qla2x00_fdmi_register(scsi_qla_host_t *vha)
1675{ 1675{
1676 int rval; 1676 int rval;
1677 struct qla_hw_data *ha = vha->hw;
1678
1679 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1680 return QLA_FUNCTION_FAILED;
1677 1681
1678 rval = qla2x00_mgmt_svr_login(vha); 1682 rval = qla2x00_mgmt_svr_login(vha);
1679 if (rval) 1683 if (rval)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f2ce8e3cc91b..9e3eaac25596 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -40,6 +40,210 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
40static int qla84xx_init_chip(scsi_qla_host_t *); 40static int qla84xx_init_chip(scsi_qla_host_t *);
41static int qla25xx_init_queues(struct qla_hw_data *); 41static int qla25xx_init_queues(struct qla_hw_data *);
42 42
43/* SRB Extensions ---------------------------------------------------------- */
44
45static void
46qla2x00_ctx_sp_timeout(unsigned long __data)
47{
48 srb_t *sp = (srb_t *)__data;
49 struct srb_ctx *ctx;
50 fc_port_t *fcport = sp->fcport;
51 struct qla_hw_data *ha = fcport->vha->hw;
52 struct req_que *req;
53 unsigned long flags;
54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL;
58 ctx = sp->ctx;
59 ctx->timeout(sp);
60 spin_unlock_irqrestore(&ha->hardware_lock, flags);
61
62 ctx->free(sp);
63}
64
65static void
66qla2x00_ctx_sp_free(srb_t *sp)
67{
68 struct srb_ctx *ctx = sp->ctx;
69
70 kfree(ctx);
71 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
72}
73
74inline srb_t *
75qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
76 unsigned long tmo)
77{
78 srb_t *sp;
79 struct qla_hw_data *ha = vha->hw;
80 struct srb_ctx *ctx;
81
82 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
83 if (!sp)
84 goto done;
85 ctx = kzalloc(size, GFP_KERNEL);
86 if (!ctx) {
87 mempool_free(sp, ha->srb_mempool);
88 goto done;
89 }
90
91 memset(sp, 0, sizeof(*sp));
92 sp->fcport = fcport;
93 sp->ctx = ctx;
94 ctx->free = qla2x00_ctx_sp_free;
95
96 init_timer(&ctx->timer);
97 if (!tmo)
98 goto done;
99 ctx->timer.expires = jiffies + tmo * HZ;
100 ctx->timer.data = (unsigned long)sp;
101 ctx->timer.function = qla2x00_ctx_sp_timeout;
102 add_timer(&ctx->timer);
103done:
104 return sp;
105}
106
107/* Asynchronous Login/Logout Routines -------------------------------------- */
108
109#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2)
110
111static void
112qla2x00_async_logio_timeout(srb_t *sp)
113{
114 fc_port_t *fcport = sp->fcport;
115 struct srb_logio *lio = sp->ctx;
116
117 DEBUG2(printk(KERN_WARNING
118 "scsi(%ld:%x): Async-%s timeout.\n",
119 fcport->vha->host_no, sp->handle,
120 lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout"));
121
122 if (lio->ctx.type == SRB_LOGIN_CMD)
123 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
124}
125
126int
127qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
128 uint16_t *data)
129{
130 struct qla_hw_data *ha = vha->hw;
131 srb_t *sp;
132 struct srb_logio *lio;
133 int rval;
134
135 rval = QLA_FUNCTION_FAILED;
136 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
137 ELS_TMO_2_RATOV(ha) + 2);
138 if (!sp)
139 goto done;
140
141 lio = sp->ctx;
142 lio->ctx.type = SRB_LOGIN_CMD;
143 lio->ctx.timeout = qla2x00_async_logio_timeout;
144 lio->flags |= SRB_LOGIN_COND_PLOGI;
145 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
146 lio->flags |= SRB_LOGIN_RETRIED;
147 rval = qla2x00_start_sp(sp);
148 if (rval != QLA_SUCCESS)
149 goto done_free_sp;
150
151 DEBUG2(printk(KERN_DEBUG
152 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
153 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
154 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
155 fcport->login_retry));
156 return rval;
157
158done_free_sp:
159 del_timer_sync(&lio->ctx.timer);
160 lio->ctx.free(sp);
161done:
162 return rval;
163}
164
165int
166qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
167{
168 struct qla_hw_data *ha = vha->hw;
169 srb_t *sp;
170 struct srb_logio *lio;
171 int rval;
172
173 rval = QLA_FUNCTION_FAILED;
174 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
175 ELS_TMO_2_RATOV(ha) + 2);
176 if (!sp)
177 goto done;
178
179 lio = sp->ctx;
180 lio->ctx.type = SRB_LOGOUT_CMD;
181 lio->ctx.timeout = qla2x00_async_logio_timeout;
182 rval = qla2x00_start_sp(sp);
183 if (rval != QLA_SUCCESS)
184 goto done_free_sp;
185
186 DEBUG2(printk(KERN_DEBUG
187 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
188 fcport->vha->host_no, sp->handle, fcport->loop_id,
189 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
190 return rval;
191
192done_free_sp:
193 del_timer_sync(&lio->ctx.timer);
194 lio->ctx.free(sp);
195done:
196 return rval;
197}
198
199int
200qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
201 uint16_t *data)
202{
203 int rval;
204 uint8_t opts = 0;
205
206 switch (data[0]) {
207 case MBS_COMMAND_COMPLETE:
208 if (fcport->flags & FCF_TAPE_PRESENT)
209 opts |= BIT_1;
210 rval = qla2x00_get_port_database(vha, fcport, opts);
211 if (rval != QLA_SUCCESS)
212 qla2x00_mark_device_lost(vha, fcport, 1, 0);
213 else
214 qla2x00_update_fcport(vha, fcport);
215 break;
216 case MBS_COMMAND_ERROR:
217 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
218 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
219 else
220 qla2x00_mark_device_lost(vha, fcport, 1, 0);
221 break;
222 case MBS_PORT_ID_USED:
223 fcport->loop_id = data[1];
224 qla2x00_post_async_login_work(vha, fcport, NULL);
225 break;
226 case MBS_LOOP_ID_USED:
227 fcport->loop_id++;
228 rval = qla2x00_find_new_loop_id(vha, fcport);
229 if (rval != QLA_SUCCESS) {
230 qla2x00_mark_device_lost(vha, fcport, 1, 0);
231 break;
232 }
233 qla2x00_post_async_login_work(vha, fcport, NULL);
234 break;
235 }
236 return QLA_SUCCESS;
237}
238
239int
240qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
241 uint16_t *data)
242{
243 qla2x00_mark_device_lost(vha, fcport, 1, 0);
244 return QLA_SUCCESS;
245}
246
43/****************************************************************************/ 247/****************************************************************************/
44/* QLogic ISP2x00 Hardware Support Functions. */ 248/* QLogic ISP2x00 Hardware Support Functions. */
45/****************************************************************************/ 249/****************************************************************************/
@@ -987,7 +1191,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
987 ha->phy_version); 1191 ha->phy_version);
988 if (rval != QLA_SUCCESS) 1192 if (rval != QLA_SUCCESS)
989 goto failed; 1193 goto failed;
990
991 ha->flags.npiv_supported = 0; 1194 ha->flags.npiv_supported = 0;
992 if (IS_QLA2XXX_MIDTYPE(ha) && 1195 if (IS_QLA2XXX_MIDTYPE(ha) &&
993 (ha->fw_attributes & BIT_2)) { 1196 (ha->fw_attributes & BIT_2)) {
@@ -1591,7 +1794,8 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1591 char *st, *en; 1794 char *st, *en;
1592 uint16_t index; 1795 uint16_t index;
1593 struct qla_hw_data *ha = vha->hw; 1796 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha); 1797 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
1798 !IS_QLA81XX(ha);
1595 1799
1596 if (memcmp(model, BINZERO, len) != 0) { 1800 if (memcmp(model, BINZERO, len) != 0) {
1597 strncpy(ha->model_number, model, len); 1801 strncpy(ha->model_number, model, len);
@@ -1978,7 +2182,7 @@ qla2x00_rport_del(void *data)
1978 struct fc_rport *rport; 2182 struct fc_rport *rport;
1979 2183
1980 spin_lock_irq(fcport->vha->host->host_lock); 2184 spin_lock_irq(fcport->vha->host->host_lock);
1981 rport = fcport->drport; 2185 rport = fcport->drport ? fcport->drport: fcport->rport;
1982 fcport->drport = NULL; 2186 fcport->drport = NULL;
1983 spin_unlock_irq(fcport->vha->host->host_lock); 2187 spin_unlock_irq(fcport->vha->host->host_lock);
1984 if (rport) 2188 if (rport)
@@ -2345,8 +2549,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2345 struct fc_rport *rport; 2549 struct fc_rport *rport;
2346 struct qla_hw_data *ha = vha->hw; 2550 struct qla_hw_data *ha = vha->hw;
2347 2551
2348 if (fcport->drport) 2552 qla2x00_rport_del(fcport);
2349 qla2x00_rport_del(fcport);
2350 2553
2351 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2554 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2352 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2555 rport_ids.port_name = wwn_to_u64(fcport->port_name);
@@ -3039,6 +3242,12 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3039 rval = QLA_SUCCESS; 3242 rval = QLA_SUCCESS;
3040 retry = 0; 3243 retry = 0;
3041 3244
3245 if (IS_ALOGIO_CAPABLE(ha)) {
3246 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3247 if (!rval)
3248 return rval;
3249 }
3250
3042 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3251 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3043 if (rval == QLA_SUCCESS) { 3252 if (rval == QLA_SUCCESS) {
3044 /* Send an ADISC to tape devices.*/ 3253 /* Send an ADISC to tape devices.*/
@@ -3133,7 +3342,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3133 } else { 3342 } else {
3134 fcport->port_type = FCT_TARGET; 3343 fcport->port_type = FCT_TARGET;
3135 if (mb[1] & BIT_1) { 3344 if (mb[1] & BIT_1) {
3136 fcport->flags |= FCF_TAPE_PRESENT; 3345 fcport->flags |= FCF_FCP2_DEVICE;
3137 } 3346 }
3138 } 3347 }
3139 3348
@@ -3244,7 +3453,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3244 struct req_que *req; 3453 struct req_que *req;
3245 struct rsp_que *rsp; 3454 struct rsp_que *rsp;
3246 3455
3247 if (ql2xmultique_tag) 3456 if (vha->hw->flags.cpu_affinity_enabled)
3248 req = vha->hw->req_q_map[0]; 3457 req = vha->hw->req_q_map[0];
3249 else 3458 else
3250 req = vha->req; 3459 req = vha->req;
@@ -3286,15 +3495,17 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3286} 3495}
3287 3496
3288void 3497void
3289qla2x00_update_fcports(scsi_qla_host_t *vha) 3498qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3290{ 3499{
3291 fc_port_t *fcport; 3500 fc_port_t *fcport;
3501 struct scsi_qla_host *tvp, *vha;
3292 3502
3293 /* Go with deferred removal of rport references. */ 3503 /* Go with deferred removal of rport references. */
3294 list_for_each_entry(fcport, &vha->vp_fcports, list) 3504 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
3295 if (fcport && fcport->drport && 3505 list_for_each_entry(fcport, &vha->vp_fcports, list)
3296 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3506 if (fcport && fcport->drport &&
3297 qla2x00_rport_del(fcport); 3507 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3508 qla2x00_rport_del(fcport);
3298} 3509}
3299 3510
3300/* 3511/*
@@ -3331,8 +3542,6 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3331 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3542 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3332 atomic_set(&vha->loop_state, LOOP_DOWN); 3543 atomic_set(&vha->loop_state, LOOP_DOWN);
3333 qla2x00_mark_all_devices_lost(vha, 0); 3544 qla2x00_mark_all_devices_lost(vha, 0);
3334 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
3335 qla2x00_mark_all_devices_lost(vp, 0);
3336 } else { 3545 } else {
3337 if (!atomic_read(&vha->loop_down_timer)) 3546 if (!atomic_read(&vha->loop_down_timer))
3338 atomic_set(&vha->loop_down_timer, 3547 atomic_set(&vha->loop_down_timer,
@@ -4264,7 +4473,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4264 return -EINVAL; 4473 return -EINVAL;
4265 4474
4266 rval = qla2x00_fw_ready(base_vha); 4475 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag) 4476 if (ha->flags.cpu_affinity_enabled)
4268 req = ha->req_q_map[0]; 4477 req = ha->req_q_map[0];
4269 else 4478 else
4270 req = vha->req; 4479 req = vha->req;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 13396beae2ce..c5ccac0bef76 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -350,6 +350,7 @@ qla2x00_start_scsi(srb_t *sp)
350 /* Build command packet */ 350 /* Build command packet */
351 req->current_outstanding_cmd = handle; 351 req->current_outstanding_cmd = handle;
352 req->outstanding_cmds[handle] = sp; 352 req->outstanding_cmds[handle] = sp;
353 sp->handle = handle;
353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 354 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
354 req->cnt -= req_cnt; 355 req->cnt -= req_cnt;
355 356
@@ -778,6 +779,7 @@ qla24xx_start_scsi(srb_t *sp)
778 /* Build command packet. */ 779 /* Build command packet. */
779 req->current_outstanding_cmd = handle; 780 req->current_outstanding_cmd = handle;
780 req->outstanding_cmds[handle] = sp; 781 req->outstanding_cmds[handle] = sp;
782 sp->handle = handle;
781 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 783 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
782 req->cnt -= req_cnt; 784 req->cnt -= req_cnt;
783 785
@@ -852,9 +854,211 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
852 struct qla_hw_data *ha = sp->fcport->vha->hw; 854 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu; 855 int affinity = cmd->request->cpu;
854 856
855 if (ql2xmultique_tag && affinity >= 0 && 857 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1) 858 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1]; 859 *rsp = ha->rsp_q_map[affinity + 1];
858 else 860 else
859 *rsp = ha->rsp_q_map[0]; 861 *rsp = ha->rsp_q_map[0];
860} 862}
863
864/* Generic Control-SRB manipulation functions. */
865
866static void *
867qla2x00_alloc_iocbs(srb_t *sp)
868{
869 scsi_qla_host_t *vha = sp->fcport->vha;
870 struct qla_hw_data *ha = vha->hw;
871 struct req_que *req = ha->req_q_map[0];
872 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
873 uint32_t index, handle;
874 request_t *pkt;
875 uint16_t cnt, req_cnt;
876
877 pkt = NULL;
878 req_cnt = 1;
879
880 /* Check for room in outstanding command list. */
881 handle = req->current_outstanding_cmd;
882 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
883 handle++;
884 if (handle == MAX_OUTSTANDING_COMMANDS)
885 handle = 1;
886 if (!req->outstanding_cmds[handle])
887 break;
888 }
889 if (index == MAX_OUTSTANDING_COMMANDS)
890 goto queuing_error;
891
892 /* Check for room on request queue. */
893 if (req->cnt < req_cnt) {
894 if (ha->mqenable)
895 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
896 else if (IS_FWI2_CAPABLE(ha))
897 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
898 else
899 cnt = qla2x00_debounce_register(
900 ISP_REQ_Q_OUT(ha, &reg->isp));
901
902 if (req->ring_index < cnt)
903 req->cnt = cnt - req->ring_index;
904 else
905 req->cnt = req->length -
906 (req->ring_index - cnt);
907 }
908 if (req->cnt < req_cnt)
909 goto queuing_error;
910
911 /* Prep packet */
912 req->current_outstanding_cmd = handle;
913 req->outstanding_cmds[handle] = sp;
914 req->cnt -= req_cnt;
915
916 pkt = req->ring_ptr;
917 memset(pkt, 0, REQUEST_ENTRY_SIZE);
918 pkt->entry_count = req_cnt;
919 pkt->handle = handle;
920 sp->handle = handle;
921
922queuing_error:
923 return pkt;
924}
925
926static void
927qla2x00_start_iocbs(srb_t *sp)
928{
929 struct qla_hw_data *ha = sp->fcport->vha->hw;
930 struct req_que *req = ha->req_q_map[0];
931 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
932 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
933
934 /* Adjust ring index. */
935 req->ring_index++;
936 if (req->ring_index == req->length) {
937 req->ring_index = 0;
938 req->ring_ptr = req->ring;
939 } else
940 req->ring_ptr++;
941
942 /* Set chip new ring index. */
943 if (ha->mqenable) {
944 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
945 RD_REG_DWORD(&ioreg->hccr);
946 } else if (IS_FWI2_CAPABLE(ha)) {
947 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
948 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
949 } else {
950 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
951 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
952 }
953}
954
955static void
956qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
957{
958 struct srb_logio *lio = sp->ctx;
959
960 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
961 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
962 if (lio->flags & SRB_LOGIN_COND_PLOGI)
963 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
964 if (lio->flags & SRB_LOGIN_SKIP_PRLI)
965 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
966 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
967 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
968 logio->port_id[1] = sp->fcport->d_id.b.area;
969 logio->port_id[2] = sp->fcport->d_id.b.domain;
970 logio->vp_index = sp->fcport->vp_idx;
971}
972
973static void
974qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
975{
976 struct qla_hw_data *ha = sp->fcport->vha->hw;
977 struct srb_logio *lio = sp->ctx;
978 uint16_t opts;
979
980 mbx->entry_type = MBX_IOCB_TYPE;;
981 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
982 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
983 opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
984 opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
985 if (HAS_EXTENDED_IDS(ha)) {
986 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
987 mbx->mb10 = cpu_to_le16(opts);
988 } else {
989 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
990 }
991 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
992 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
993 sp->fcport->d_id.b.al_pa);
994 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
995}
996
997static void
998qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
999{
1000 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1001 logio->control_flags =
1002 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1003 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1004 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1005 logio->port_id[1] = sp->fcport->d_id.b.area;
1006 logio->port_id[2] = sp->fcport->d_id.b.domain;
1007 logio->vp_index = sp->fcport->vp_idx;
1008}
1009
1010static void
1011qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1012{
1013 struct qla_hw_data *ha = sp->fcport->vha->hw;
1014
1015 mbx->entry_type = MBX_IOCB_TYPE;;
1016 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1017 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1018 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1019 cpu_to_le16(sp->fcport->loop_id):
1020 cpu_to_le16(sp->fcport->loop_id << 8);
1021 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1022 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1023 sp->fcport->d_id.b.al_pa);
1024 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1025 /* Implicit: mbx->mbx10 = 0. */
1026}
1027
1028int
1029qla2x00_start_sp(srb_t *sp)
1030{
1031 int rval;
1032 struct qla_hw_data *ha = sp->fcport->vha->hw;
1033 void *pkt;
1034 struct srb_ctx *ctx = sp->ctx;
1035 unsigned long flags;
1036
1037 rval = QLA_FUNCTION_FAILED;
1038 spin_lock_irqsave(&ha->hardware_lock, flags);
1039 pkt = qla2x00_alloc_iocbs(sp);
1040 if (!pkt)
1041 goto done;
1042
1043 rval = QLA_SUCCESS;
1044 switch (ctx->type) {
1045 case SRB_LOGIN_CMD:
1046 IS_FWI2_CAPABLE(ha) ?
1047 qla24xx_login_iocb(sp, pkt):
1048 qla2x00_login_iocb(sp, pkt);
1049 break;
1050 case SRB_LOGOUT_CMD:
1051 IS_FWI2_CAPABLE(ha) ?
1052 qla24xx_logout_iocb(sp, pkt):
1053 qla2x00_logout_iocb(sp, pkt);
1054 break;
1055 default:
1056 break;
1057 }
1058
1059 wmb();
1060 qla2x00_start_iocbs(sp);
1061done:
1062 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1063 return rval;
1064}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 245e7afb4c4d..b20a7169aac2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -598,9 +598,54 @@ skip_rio:
598 break; 598 break;
599 599
600 case MBA_PORT_UPDATE: /* Port database update */ 600 case MBA_PORT_UPDATE: /* Port database update */
601 /* Only handle SCNs for our Vport index. */ 601 /*
602 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) 602 * Handle only global and vn-port update events
603 *
604 * Relevant inputs:
605 * mb[1] = N_Port handle of changed port
606 * OR 0xffff for global event
607 * mb[2] = New login state
608 * 7 = Port logged out
609 * mb[3] = LSB is vp_idx, 0xff = all vps
610 *
611 * Skip processing if:
612 * Event is global, vp_idx is NOT all vps,
613 * vp_idx does not match
614 * Event is not global, vp_idx does not match
615 */
616 if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff)
617 || (mb[1] != 0xffff)) {
618 if (vha->vp_idx != (mb[3] & 0xff))
619 break;
620 }
621
622 /* Global event -- port logout or port unavailable. */
623 if (mb[1] == 0xffff && mb[2] == 0x7) {
624 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
625 vha->host_no));
626 DEBUG(printk(KERN_INFO
627 "scsi(%ld): Port unavailable %04x %04x %04x.\n",
628 vha->host_no, mb[1], mb[2], mb[3]));
629
630 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
631 atomic_set(&vha->loop_state, LOOP_DOWN);
632 atomic_set(&vha->loop_down_timer,
633 LOOP_DOWN_TIME);
634 vha->device_flags |= DFLG_NO_CABLE;
635 qla2x00_mark_all_devices_lost(vha, 1);
636 }
637
638 if (vha->vp_idx) {
639 atomic_set(&vha->vp_state, VP_FAILED);
640 fc_vport_set_state(vha->fc_vport,
641 FC_VPORT_FAILED);
642 qla2x00_mark_all_devices_lost(vha, 1);
643 }
644
645 vha->flags.management_server_logged_in = 0;
646 ha->link_data_rate = PORT_SPEED_UNKNOWN;
603 break; 647 break;
648 }
604 649
605 /* 650 /*
606 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 651 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
@@ -640,8 +685,9 @@ skip_rio:
640 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 685 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
641 break; 686 break;
642 /* Only handle SCNs for our Vport index. */ 687 /* Only handle SCNs for our Vport index. */
643 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) 688 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
644 break; 689 break;
690
645 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 691 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
646 vha->host_no)); 692 vha->host_no));
647 DEBUG(printk(KERN_INFO 693 DEBUG(printk(KERN_INFO
@@ -874,6 +920,249 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
874 } 920 }
875} 921}
876 922
923static srb_t *
924qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
925 struct req_que *req, void *iocb)
926{
927 struct qla_hw_data *ha = vha->hw;
928 sts_entry_t *pkt = iocb;
929 srb_t *sp = NULL;
930 uint16_t index;
931
932 index = LSW(pkt->handle);
933 if (index >= MAX_OUTSTANDING_COMMANDS) {
934 qla_printk(KERN_WARNING, ha,
935 "%s: Invalid completion handle (%x).\n", func, index);
936 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
937 goto done;
938 }
939 sp = req->outstanding_cmds[index];
940 if (!sp) {
941 qla_printk(KERN_WARNING, ha,
942 "%s: Invalid completion handle (%x) -- timed-out.\n", func,
943 index);
944 return sp;
945 }
946 if (sp->handle != index) {
947 qla_printk(KERN_WARNING, ha,
948 "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
949 index);
950 return NULL;
951 }
952 req->outstanding_cmds[index] = NULL;
953done:
954 return sp;
955}
956
957static void
958qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
959 struct mbx_entry *mbx)
960{
961 const char func[] = "MBX-IOCB";
962 const char *type;
963 struct qla_hw_data *ha = vha->hw;
964 fc_port_t *fcport;
965 srb_t *sp;
966 struct srb_logio *lio;
967 uint16_t data[2];
968
969 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
970 if (!sp)
971 return;
972
973 type = NULL;
974 lio = sp->ctx;
975 switch (lio->ctx.type) {
976 case SRB_LOGIN_CMD:
977 type = "login";
978 break;
979 case SRB_LOGOUT_CMD:
980 type = "logout";
981 break;
982 default:
983 qla_printk(KERN_WARNING, ha,
984 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
985 lio->ctx.type);
986 return;
987 }
988
989 del_timer(&lio->ctx.timer);
990 fcport = sp->fcport;
991
992 data[0] = data[1] = 0;
993 if (mbx->entry_status) {
994 DEBUG2(printk(KERN_WARNING
995 "scsi(%ld:%x): Async-%s error entry - entry-status=%x "
996 "status=%x state-flag=%x status-flags=%x.\n",
997 fcport->vha->host_no, sp->handle, type,
998 mbx->entry_status, le16_to_cpu(mbx->status),
999 le16_to_cpu(mbx->state_flags),
1000 le16_to_cpu(mbx->status_flags)));
1001 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
1002
1003 data[0] = MBS_COMMAND_ERROR;
1004 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1005 QLA_LOGIO_LOGIN_RETRIED: 0;
1006 goto done_post_logio_done_work;
1007 }
1008
1009 if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1010 DEBUG2(printk(KERN_DEBUG
1011 "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
1012 fcport->vha->host_no, sp->handle, type,
1013 le16_to_cpu(mbx->mb1)));
1014
1015 data[0] = MBS_COMMAND_COMPLETE;
1016 if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1)
1017 fcport->flags |= FCF_FCP2_DEVICE;
1018
1019 goto done_post_logio_done_work;
1020 }
1021
1022 data[0] = le16_to_cpu(mbx->mb0);
1023 switch (data[0]) {
1024 case MBS_PORT_ID_USED:
1025 data[1] = le16_to_cpu(mbx->mb1);
1026 break;
1027 case MBS_LOOP_ID_USED:
1028 break;
1029 default:
1030 data[0] = MBS_COMMAND_ERROR;
1031 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1032 QLA_LOGIO_LOGIN_RETRIED: 0;
1033 break;
1034 }
1035
1036 DEBUG2(printk(KERN_WARNING
1037 "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
1038 "mb6=%x mb7=%x.\n",
1039 fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status),
1040 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1041 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1042 le16_to_cpu(mbx->mb7)));
1043
1044done_post_logio_done_work:
1045 lio->ctx.type == SRB_LOGIN_CMD ?
1046 qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
1047 qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
1048
1049 lio->ctx.free(sp);
1050}
1051
1052static void
1053qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1054 struct logio_entry_24xx *logio)
1055{
1056 const char func[] = "LOGIO-IOCB";
1057 const char *type;
1058 struct qla_hw_data *ha = vha->hw;
1059 fc_port_t *fcport;
1060 srb_t *sp;
1061 struct srb_logio *lio;
1062 uint16_t data[2];
1063 uint32_t iop[2];
1064
1065 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1066 if (!sp)
1067 return;
1068
1069 type = NULL;
1070 lio = sp->ctx;
1071 switch (lio->ctx.type) {
1072 case SRB_LOGIN_CMD:
1073 type = "login";
1074 break;
1075 case SRB_LOGOUT_CMD:
1076 type = "logout";
1077 break;
1078 default:
1079 qla_printk(KERN_WARNING, ha,
1080 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1081 lio->ctx.type);
1082 return;
1083 }
1084
1085 del_timer(&lio->ctx.timer);
1086 fcport = sp->fcport;
1087
1088 data[0] = data[1] = 0;
1089 if (logio->entry_status) {
1090 DEBUG2(printk(KERN_WARNING
1091 "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1092 fcport->vha->host_no, sp->handle, type,
1093 logio->entry_status));
1094 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1095
1096 data[0] = MBS_COMMAND_ERROR;
1097 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1098 QLA_LOGIO_LOGIN_RETRIED: 0;
1099 goto done_post_logio_done_work;
1100 }
1101
1102 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1103 DEBUG2(printk(KERN_DEBUG
1104 "scsi(%ld:%x): Async-%s complete - iop0=%x.\n",
1105 fcport->vha->host_no, sp->handle, type,
1106 le32_to_cpu(logio->io_parameter[0])));
1107
1108 data[0] = MBS_COMMAND_COMPLETE;
1109 if (lio->ctx.type == SRB_LOGOUT_CMD)
1110 goto done_post_logio_done_work;
1111
1112 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1113 if (iop[0] & BIT_4) {
1114 fcport->port_type = FCT_TARGET;
1115 if (iop[0] & BIT_8)
1116 fcport->flags |= FCF_FCP2_DEVICE;
1117 }
1118 if (iop[0] & BIT_5)
1119 fcport->port_type = FCT_INITIATOR;
1120 if (logio->io_parameter[7] || logio->io_parameter[8])
1121 fcport->supported_classes |= FC_COS_CLASS2;
1122 if (logio->io_parameter[9] || logio->io_parameter[10])
1123 fcport->supported_classes |= FC_COS_CLASS3;
1124
1125 goto done_post_logio_done_work;
1126 }
1127
1128 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1129 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1130 switch (iop[0]) {
1131 case LSC_SCODE_PORTID_USED:
1132 data[0] = MBS_PORT_ID_USED;
1133 data[1] = LSW(iop[1]);
1134 break;
1135 case LSC_SCODE_NPORT_USED:
1136 data[0] = MBS_LOOP_ID_USED;
1137 break;
1138 case LSC_SCODE_CMD_FAILED:
1139 if ((iop[1] & 0xff) == 0x05) {
1140 data[0] = MBS_NOT_LOGGED_IN;
1141 break;
1142 }
1143 /* Fall through. */
1144 default:
1145 data[0] = MBS_COMMAND_ERROR;
1146 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1147 QLA_LOGIO_LOGIN_RETRIED: 0;
1148 break;
1149 }
1150
1151 DEBUG2(printk(KERN_WARNING
1152 "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n",
1153 fcport->vha->host_no, sp->handle, type,
1154 le16_to_cpu(logio->comp_status),
1155 le32_to_cpu(logio->io_parameter[0]),
1156 le32_to_cpu(logio->io_parameter[1])));
1157
1158done_post_logio_done_work:
1159 lio->ctx.type == SRB_LOGIN_CMD ?
1160 qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
1161 qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
1162
1163 lio->ctx.free(sp);
1164}
1165
877/** 1166/**
878 * qla2x00_process_response_queue() - Process response queue entries. 1167 * qla2x00_process_response_queue() - Process response queue entries.
879 * @ha: SCSI driver HA context 1168 * @ha: SCSI driver HA context
@@ -935,6 +1224,9 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
935 case STATUS_CONT_TYPE: 1224 case STATUS_CONT_TYPE:
936 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1225 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
937 break; 1226 break;
1227 case MBX_IOCB_TYPE:
1228 qla2x00_mbx_iocb_entry(vha, rsp->req,
1229 (struct mbx_entry *)pkt);
938 default: 1230 default:
939 /* Type Not Supported. */ 1231 /* Type Not Supported. */
940 DEBUG4(printk(KERN_WARNING 1232 DEBUG4(printk(KERN_WARNING
@@ -1223,6 +1515,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1223 cp->device->id, cp->device->lun, resid, 1515 cp->device->id, cp->device->lun, resid,
1224 scsi_bufflen(cp))); 1516 scsi_bufflen(cp)));
1225 1517
1518 scsi_set_resid(cp, resid);
1226 cp->result = DID_ERROR << 16; 1519 cp->result = DID_ERROR << 16;
1227 break; 1520 break;
1228 } 1521 }
@@ -1544,6 +1837,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1544 qla24xx_report_id_acquisition(vha, 1837 qla24xx_report_id_acquisition(vha,
1545 (struct vp_rpt_id_entry_24xx *)pkt); 1838 (struct vp_rpt_id_entry_24xx *)pkt);
1546 break; 1839 break;
1840 case LOGINOUT_PORT_IOCB_TYPE:
1841 qla24xx_logio_entry(vha, rsp->req,
1842 (struct logio_entry_24xx *)pkt);
1843 break;
1547 default: 1844 default:
1548 /* Type Not Supported. */ 1845 /* Type Not Supported. */
1549 DEBUG4(printk(KERN_WARNING 1846 DEBUG4(printk(KERN_WARNING
@@ -1723,8 +2020,10 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1723 2020
1724 vha = qla25xx_get_host(rsp); 2021 vha = qla25xx_get_host(rsp);
1725 qla24xx_process_response_queue(vha, rsp); 2022 qla24xx_process_response_queue(vha, rsp);
1726 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2023 if (!ha->mqenable) {
1727 2024 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2025 RD_REG_DWORD_RELAXED(&reg->hccr);
2026 }
1728 spin_unlock_irq(&ha->hardware_lock); 2027 spin_unlock_irq(&ha->hardware_lock);
1729 2028
1730 return IRQ_HANDLED; 2029 return IRQ_HANDLED;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index fe69f3057671..b6202fe118ac 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1507,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1507 1507
1508 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1508 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1509 1509
1510 if (ql2xmultique_tag) 1510 if (ha->flags.cpu_affinity_enabled)
1511 req = ha->req_q_map[0]; 1511 req = ha->req_q_map[0];
1512 else 1512 else
1513 req = vha->req; 1513 req = vha->req;
@@ -2324,7 +2324,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2324 vha = fcport->vha; 2324 vha = fcport->vha;
2325 ha = vha->hw; 2325 ha = vha->hw;
2326 req = vha->req; 2326 req = vha->req;
2327 if (ql2xmultique_tag) 2327 if (ha->flags.cpu_affinity_enabled)
2328 rsp = ha->rsp_q_map[tag + 1]; 2328 rsp = ha->rsp_q_map[tag + 1];
2329 else 2329 else
2330 rsp = req->rsp; 2330 rsp = req->rsp;
@@ -2746,7 +2746,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2746 if (rptid_entry->format == 0) { 2746 if (rptid_entry->format == 0) {
2747 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2747 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
2748 " number of VPs acquired %d\n", __func__, vha->host_no, 2748 " number of VPs acquired %d\n", __func__, vha->host_no,
2749 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count))); 2749 MSB(le16_to_cpu(rptid_entry->vp_count)),
2750 LSB(le16_to_cpu(rptid_entry->vp_count))));
2750 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2751 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
2751 rptid_entry->port_id[2], rptid_entry->port_id[1], 2752 rptid_entry->port_id[2], rptid_entry->port_id[1],
2752 rptid_entry->port_id[0])); 2753 rptid_entry->port_id[0]));
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cd78c501803a..42b799abba57 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -42,7 +42,6 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
42 42
43 set_bit(vp_id, ha->vp_idx_map); 43 set_bit(vp_id, ha->vp_idx_map);
44 ha->num_vhosts++; 44 ha->num_vhosts++;
45 ha->cur_vport_count++;
46 vha->vp_idx = vp_id; 45 vha->vp_idx = vp_id;
47 list_add_tail(&vha->list, &ha->vp_list); 46 list_add_tail(&vha->list, &ha->vp_list);
48 mutex_unlock(&ha->vport_lock); 47 mutex_unlock(&ha->vport_lock);
@@ -58,7 +57,6 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
58 mutex_lock(&ha->vport_lock); 57 mutex_lock(&ha->vport_lock);
59 vp_id = vha->vp_idx; 58 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 59 ha->num_vhosts--;
61 ha->cur_vport_count--;
62 clear_bit(vp_id, ha->vp_idx_map); 60 clear_bit(vp_id, ha->vp_idx_map);
63 list_del(&vha->list); 61 list_del(&vha->list);
64 mutex_unlock(&ha->vport_lock); 62 mutex_unlock(&ha->vport_lock);
@@ -235,7 +233,11 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
235 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 233 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
236 } 234 }
237 235
238 /* To exclusively reset vport, we need to log it out first.*/ 236 /*
237 * To exclusively reset vport, we need to log it out first. Note: this
238 * control_vp can fail if ISP reset is already issued, this is
239 * expected, as the vp would be already logged out due to ISP reset.
240 */
239 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 241 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
240 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 242 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
241 243
@@ -247,18 +249,11 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
247static int 249static int
248qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 250qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
249{ 251{
250 struct qla_hw_data *ha = vha->hw; 252 qla2x00_do_work(vha);
251 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
252 253
253 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 254 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
254 /* VP acquired. complete port configuration */ 255 /* VP acquired. complete port configuration */
255 if (atomic_read(&base_vha->loop_state) == LOOP_READY) { 256 qla24xx_configure_vp(vha);
256 qla24xx_configure_vp(vha);
257 } else {
258 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
259 set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
260 }
261
262 return 0; 257 return 0;
263 } 258 }
264 259
@@ -309,6 +304,9 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
309 304
310 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); 305 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
311 306
307 if (!(ha->current_topology & ISP_CFG_F))
308 return;
309
312 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 310 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
313 if (vp->vp_idx) 311 if (vp->vp_idx)
314 ret = qla2x00_do_dpc_vp(vp); 312 ret = qla2x00_do_dpc_vp(vp);
@@ -413,6 +411,11 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
413 411
414 vha->flags.init_done = 1; 412 vha->flags.init_done = 1;
415 413
414 mutex_lock(&ha->vport_lock);
415 set_bit(vha->vp_idx, ha->vp_idx_map);
416 ha->cur_vport_count++;
417 mutex_unlock(&ha->vport_lock);
418
416 return vha; 419 return vha;
417 420
418create_vhost_failed: 421create_vhost_failed:
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f0396e79b6fa..b79fca7d461b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -287,9 +287,12 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
287 int ques, req, ret; 287 int ques, req, ret;
288 struct qla_hw_data *ha = vha->hw; 288 struct qla_hw_data *ha = vha->hw;
289 289
290 if (!(ha->fw_attributes & BIT_6)) {
291 qla_printk(KERN_INFO, ha,
292 "Firmware is not multi-queue capable\n");
293 goto fail;
294 }
290 if (ql2xmultique_tag) { 295 if (ql2xmultique_tag) {
291 /* CPU affinity mode */
292 ha->wq = create_workqueue("qla2xxx_wq");
293 /* create a request queue for IO */ 296 /* create a request queue for IO */
294 options |= BIT_7; 297 options |= BIT_7;
295 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 298 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
@@ -299,6 +302,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
299 "Can't create request queue\n"); 302 "Can't create request queue\n");
300 goto fail; 303 goto fail;
301 } 304 }
305 ha->wq = create_workqueue("qla2xxx_wq");
302 vha->req = ha->req_q_map[req]; 306 vha->req = ha->req_q_map[req];
303 options |= BIT_1; 307 options |= BIT_1;
304 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 308 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
@@ -309,6 +313,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
309 goto fail2; 313 goto fail2;
310 } 314 }
311 } 315 }
316 ha->flags.cpu_affinity_enabled = 1;
317
312 DEBUG2(qla_printk(KERN_INFO, ha, 318 DEBUG2(qla_printk(KERN_INFO, ha,
313 "CPU affinity mode enabled, no. of response" 319 "CPU affinity mode enabled, no. of response"
314 " queues:%d, no. of request queues:%d\n", 320 " queues:%d, no. of request queues:%d\n",
@@ -317,8 +323,13 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
317 return 0; 323 return 0;
318fail2: 324fail2:
319 qla25xx_delete_queues(vha); 325 qla25xx_delete_queues(vha);
326 destroy_workqueue(ha->wq);
327 ha->wq = NULL;
320fail: 328fail:
321 ha->mqenable = 0; 329 ha->mqenable = 0;
330 kfree(ha->req_q_map);
331 kfree(ha->rsp_q_map);
332 ha->max_req_queues = ha->max_rsp_queues = 1;
322 return 1; 333 return 1;
323} 334}
324 335
@@ -462,6 +473,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
462 sp->flags = 0; 473 sp->flags = 0;
463 CMD_SP(cmd) = (void *)sp; 474 CMD_SP(cmd) = (void *)sp;
464 cmd->scsi_done = done; 475 cmd->scsi_done = done;
476 sp->ctx = NULL;
465 477
466 return sp; 478 return sp;
467} 479}
@@ -556,11 +568,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
556 unsigned long wait_iter = ABORT_WAIT_ITER; 568 unsigned long wait_iter = ABORT_WAIT_ITER;
557 int ret = QLA_SUCCESS; 569 int ret = QLA_SUCCESS;
558 570
559 while (CMD_SP(cmd)) { 571 while (CMD_SP(cmd) && wait_iter--) {
560 msleep(ABORT_POLLING_PERIOD); 572 msleep(ABORT_POLLING_PERIOD);
561
562 if (--wait_iter)
563 break;
564 } 573 }
565 if (CMD_SP(cmd)) 574 if (CMD_SP(cmd))
566 ret = QLA_FUNCTION_FAILED; 575 ret = QLA_FUNCTION_FAILED;
@@ -698,6 +707,8 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
698 continue; 707 continue;
699 if (sp->fcport != fcport) 708 if (sp->fcport != fcport)
700 continue; 709 continue;
710 if (sp->ctx)
711 continue;
701 712
702 spin_unlock_irqrestore(&ha->hardware_lock, flags); 713 spin_unlock_irqrestore(&ha->hardware_lock, flags);
703 if (ha->isp_ops->abort_command(sp)) { 714 if (ha->isp_ops->abort_command(sp)) {
@@ -783,7 +794,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
783 794
784 if (sp == NULL) 795 if (sp == NULL)
785 continue; 796 continue;
786 797 if (sp->ctx)
798 continue;
787 if (sp->cmd != cmd) 799 if (sp->cmd != cmd)
788 continue; 800 continue;
789 801
@@ -848,7 +860,8 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
848 sp = req->outstanding_cmds[cnt]; 860 sp = req->outstanding_cmds[cnt];
849 if (!sp) 861 if (!sp)
850 continue; 862 continue;
851 863 if (sp->ctx)
864 continue;
852 if (vha->vp_idx != sp->fcport->vha->vp_idx) 865 if (vha->vp_idx != sp->fcport->vha->vp_idx)
853 continue; 866 continue;
854 match = 0; 867 match = 0;
@@ -1106,8 +1119,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1106 struct fc_port *fcport; 1119 struct fc_port *fcport;
1107 struct qla_hw_data *ha = vha->hw; 1120 struct qla_hw_data *ha = vha->hw;
1108 1121
1109 if (ha->flags.enable_lip_full_login && !vha->vp_idx && 1122 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
1110 !IS_QLA81XX(ha)) {
1111 ret = qla2x00_full_login_lip(vha); 1123 ret = qla2x00_full_login_lip(vha);
1112 if (ret != QLA_SUCCESS) { 1124 if (ret != QLA_SUCCESS) {
1113 DEBUG2_3(printk("%s(%ld): failed: " 1125 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1120,7 +1132,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1120 qla2x00_wait_for_loop_ready(vha); 1132 qla2x00_wait_for_loop_ready(vha);
1121 } 1133 }
1122 1134
1123 if (ha->flags.enable_lip_reset && !vha->vp_idx) { 1135 if (ha->flags.enable_lip_reset) {
1124 ret = qla2x00_lip_reset(vha); 1136 ret = qla2x00_lip_reset(vha);
1125 if (ret != QLA_SUCCESS) { 1137 if (ret != QLA_SUCCESS) {
1126 DEBUG2_3(printk("%s(%ld): failed: " 1138 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1154,6 +1166,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1154 int que, cnt; 1166 int que, cnt;
1155 unsigned long flags; 1167 unsigned long flags;
1156 srb_t *sp; 1168 srb_t *sp;
1169 struct srb_ctx *ctx;
1157 struct qla_hw_data *ha = vha->hw; 1170 struct qla_hw_data *ha = vha->hw;
1158 struct req_que *req; 1171 struct req_que *req;
1159 1172
@@ -1166,8 +1179,14 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1166 sp = req->outstanding_cmds[cnt]; 1179 sp = req->outstanding_cmds[cnt];
1167 if (sp) { 1180 if (sp) {
1168 req->outstanding_cmds[cnt] = NULL; 1181 req->outstanding_cmds[cnt] = NULL;
1169 sp->cmd->result = res; 1182 if (!sp->ctx) {
1170 qla2x00_sp_compl(ha, sp); 1183 sp->cmd->result = res;
1184 qla2x00_sp_compl(ha, sp);
1185 } else {
1186 ctx = sp->ctx;
1187 del_timer_sync(&ctx->timer);
1188 ctx->free(sp);
1189 }
1171 } 1190 }
1172 } 1191 }
1173 } 1192 }
@@ -1193,6 +1212,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1193 scsi_qla_host_t *vha = shost_priv(sdev->host); 1212 scsi_qla_host_t *vha = shost_priv(sdev->host);
1194 struct qla_hw_data *ha = vha->hw; 1213 struct qla_hw_data *ha = vha->hw;
1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1214 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1215 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1196 struct req_que *req = vha->req; 1216 struct req_que *req = vha->req;
1197 1217
1198 if (sdev->tagged_supported) 1218 if (sdev->tagged_supported)
@@ -1201,6 +1221,8 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1201 scsi_deactivate_tcq(sdev, req->max_q_depth); 1221 scsi_deactivate_tcq(sdev, req->max_q_depth);
1202 1222
1203 rport->dev_loss_tmo = ha->port_down_retry_count; 1223 rport->dev_loss_tmo = ha->port_down_retry_count;
1224 if (sdev->type == TYPE_TAPE)
1225 fcport->flags |= FCF_TAPE_PRESENT;
1204 1226
1205 return 0; 1227 return 0;
1206} 1228}
@@ -1923,6 +1945,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1923 if (ret) 1945 if (ret)
1924 goto probe_init_failed; 1946 goto probe_init_failed;
1925 /* Alloc arrays of request and response ring ptrs */ 1947 /* Alloc arrays of request and response ring ptrs */
1948que_init:
1926 if (!qla2x00_alloc_queues(ha)) { 1949 if (!qla2x00_alloc_queues(ha)) {
1927 qla_printk(KERN_WARNING, ha, 1950 qla_printk(KERN_WARNING, ha,
1928 "[ERROR] Failed to allocate memory for queue" 1951 "[ERROR] Failed to allocate memory for queue"
@@ -1959,11 +1982,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1959 goto probe_failed; 1982 goto probe_failed;
1960 } 1983 }
1961 1984
1962 if (ha->mqenable) 1985 if (ha->mqenable) {
1963 if (qla25xx_setup_mode(base_vha)) 1986 if (qla25xx_setup_mode(base_vha)) {
1964 qla_printk(KERN_WARNING, ha, 1987 qla_printk(KERN_WARNING, ha,
1965 "Can't create queues, falling back to single" 1988 "Can't create queues, falling back to single"
1966 " queue mode\n"); 1989 " queue mode\n");
1990 goto que_init;
1991 }
1992 }
1967 1993
1968 if (ha->flags.running_gold_fw) 1994 if (ha->flags.running_gold_fw)
1969 goto skip_dpc; 1995 goto skip_dpc;
@@ -2155,17 +2181,19 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2155 int defer) 2181 int defer)
2156{ 2182{
2157 struct fc_rport *rport; 2183 struct fc_rport *rport;
2184 scsi_qla_host_t *base_vha;
2158 2185
2159 if (!fcport->rport) 2186 if (!fcport->rport)
2160 return; 2187 return;
2161 2188
2162 rport = fcport->rport; 2189 rport = fcport->rport;
2163 if (defer) { 2190 if (defer) {
2191 base_vha = pci_get_drvdata(vha->hw->pdev);
2164 spin_lock_irq(vha->host->host_lock); 2192 spin_lock_irq(vha->host->host_lock);
2165 fcport->drport = rport; 2193 fcport->drport = rport;
2166 spin_unlock_irq(vha->host->host_lock); 2194 spin_unlock_irq(vha->host->host_lock);
2167 set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 2195 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2168 qla2xxx_wake_dpc(vha); 2196 qla2xxx_wake_dpc(base_vha);
2169 } else 2197 } else
2170 fc_remote_port_delete(rport); 2198 fc_remote_port_delete(rport);
2171} 2199}
@@ -2237,8 +2265,9 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2237 fc_port_t *fcport; 2265 fc_port_t *fcport;
2238 2266
2239 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2267 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2240 if (vha->vp_idx != fcport->vp_idx) 2268 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
2241 continue; 2269 continue;
2270
2242 /* 2271 /*
2243 * No point in marking the device as lost, if the device is 2272 * No point in marking the device as lost, if the device is
2244 * already DEAD. 2273 * already DEAD.
@@ -2246,10 +2275,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2246 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2275 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2247 continue; 2276 continue;
2248 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2277 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2249 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2278 if (defer)
2250 qla2x00_schedule_rport_del(vha, fcport, defer); 2279 qla2x00_schedule_rport_del(vha, fcport, defer);
2251 } else 2280 else if (vha->vp_idx == fcport->vp_idx)
2252 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2281 qla2x00_schedule_rport_del(vha, fcport, defer);
2282 }
2283 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2253 } 2284 }
2254} 2285}
2255 2286
@@ -2598,7 +2629,31 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2598 return qla2x00_post_work(vha, e); 2629 return qla2x00_post_work(vha, e);
2599} 2630}
2600 2631
2601static void 2632#define qla2x00_post_async_work(name, type) \
2633int qla2x00_post_async_##name##_work( \
2634 struct scsi_qla_host *vha, \
2635 fc_port_t *fcport, uint16_t *data) \
2636{ \
2637 struct qla_work_evt *e; \
2638 \
2639 e = qla2x00_alloc_work(vha, type); \
2640 if (!e) \
2641 return QLA_FUNCTION_FAILED; \
2642 \
2643 e->u.logio.fcport = fcport; \
2644 if (data) { \
2645 e->u.logio.data[0] = data[0]; \
2646 e->u.logio.data[1] = data[1]; \
2647 } \
2648 return qla2x00_post_work(vha, e); \
2649}
2650
2651qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
2652qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
2653qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
2654qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
2655
2656void
2602qla2x00_do_work(struct scsi_qla_host *vha) 2657qla2x00_do_work(struct scsi_qla_host *vha)
2603{ 2658{
2604 struct qla_work_evt *e, *tmp; 2659 struct qla_work_evt *e, *tmp;
@@ -2620,6 +2675,21 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2620 case QLA_EVT_IDC_ACK: 2675 case QLA_EVT_IDC_ACK:
2621 qla81xx_idc_ack(vha, e->u.idc_ack.mb); 2676 qla81xx_idc_ack(vha, e->u.idc_ack.mb);
2622 break; 2677 break;
2678 case QLA_EVT_ASYNC_LOGIN:
2679 qla2x00_async_login(vha, e->u.logio.fcport,
2680 e->u.logio.data);
2681 break;
2682 case QLA_EVT_ASYNC_LOGIN_DONE:
2683 qla2x00_async_login_done(vha, e->u.logio.fcport,
2684 e->u.logio.data);
2685 break;
2686 case QLA_EVT_ASYNC_LOGOUT:
2687 qla2x00_async_logout(vha, e->u.logio.fcport);
2688 break;
2689 case QLA_EVT_ASYNC_LOGOUT_DONE:
2690 qla2x00_async_logout_done(vha, e->u.logio.fcport,
2691 e->u.logio.data);
2692 break;
2623 } 2693 }
2624 if (e->flags & QLA_EVT_FLAG_FREE) 2694 if (e->flags & QLA_EVT_FLAG_FREE)
2625 kfree(e); 2695 kfree(e);
@@ -2635,6 +2705,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2635 int status; 2705 int status;
2636 uint16_t next_loopid = 0; 2706 uint16_t next_loopid = 0;
2637 struct qla_hw_data *ha = vha->hw; 2707 struct qla_hw_data *ha = vha->hw;
2708 uint16_t data[2];
2638 2709
2639 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2710 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2640 /* 2711 /*
@@ -2644,6 +2715,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2644 if (atomic_read(&fcport->state) != 2715 if (atomic_read(&fcport->state) !=
2645 FCS_ONLINE && fcport->login_retry) { 2716 FCS_ONLINE && fcport->login_retry) {
2646 2717
2718 fcport->login_retry--;
2647 if (fcport->flags & FCF_FABRIC_DEVICE) { 2719 if (fcport->flags & FCF_FABRIC_DEVICE) {
2648 if (fcport->flags & FCF_TAPE_PRESENT) 2720 if (fcport->flags & FCF_TAPE_PRESENT)
2649 ha->isp_ops->fabric_logout(vha, 2721 ha->isp_ops->fabric_logout(vha,
@@ -2652,13 +2724,22 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2652 fcport->d_id.b.area, 2724 fcport->d_id.b.area,
2653 fcport->d_id.b.al_pa); 2725 fcport->d_id.b.al_pa);
2654 2726
2655 status = qla2x00_fabric_login(vha, fcport, 2727 if (IS_ALOGIO_CAPABLE(ha)) {
2656 &next_loopid); 2728 data[0] = 0;
2729 data[1] = QLA_LOGIO_LOGIN_RETRIED;
2730 status = qla2x00_post_async_login_work(
2731 vha, fcport, data);
2732 if (status == QLA_SUCCESS)
2733 continue;
2734 /* Attempt a retry. */
2735 status = 1;
2736 } else
2737 status = qla2x00_fabric_login(vha,
2738 fcport, &next_loopid);
2657 } else 2739 } else
2658 status = qla2x00_local_device_login(vha, 2740 status = qla2x00_local_device_login(vha,
2659 fcport); 2741 fcport);
2660 2742
2661 fcport->login_retry--;
2662 if (status == QLA_SUCCESS) { 2743 if (status == QLA_SUCCESS) {
2663 fcport->old_loop_id = fcport->loop_id; 2744 fcport->old_loop_id = fcport->loop_id;
2664 2745
@@ -2831,6 +2912,9 @@ qla2x00_do_dpc(void *data)
2831 */ 2912 */
2832 ha->dpc_active = 0; 2913 ha->dpc_active = 0;
2833 2914
2915 /* Cleanup any residual CTX SRBs. */
2916 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2917
2834 return 0; 2918 return 0;
2835} 2919}
2836 2920
@@ -2971,6 +3055,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
2971 sp = req->outstanding_cmds[index]; 3055 sp = req->outstanding_cmds[index];
2972 if (!sp) 3056 if (!sp)
2973 continue; 3057 continue;
3058 if (sp->ctx)
3059 continue;
2974 sfcp = sp->fcport; 3060 sfcp = sp->fcport;
2975 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 3061 if (!(sfcp->flags & FCF_TAPE_PRESENT))
2976 continue; 3062 continue;
@@ -2987,8 +3073,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
2987 3073
2988 /* if the loop has been down for 4 minutes, reinit adapter */ 3074 /* if the loop has been down for 4 minutes, reinit adapter */
2989 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 3075 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
2990 if (!(vha->device_flags & DFLG_NO_CABLE) && 3076 if (!(vha->device_flags & DFLG_NO_CABLE)) {
2991 !vha->vp_idx) {
2992 DEBUG(printk("scsi(%ld): Loop down - " 3077 DEBUG(printk("scsi(%ld): Loop down - "
2993 "aborting ISP.\n", 3078 "aborting ISP.\n",
2994 vha->host_no)); 3079 vha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 84369705a9ad..ac107a2c34a4 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k4" 10#define QLA2XXX_VERSION "8.03.01-k6"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 8025ee16588e..c196d55eae39 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -227,11 +227,11 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
227 case SCS_DATA_UNDERRUN: 227 case SCS_DATA_UNDERRUN:
228 case SCS_DATA_OVERRUN: 228 case SCS_DATA_OVERRUN:
229 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || 229 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
230 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { 230 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
231 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, " 231 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
232 "residual = 0x%x\n", ha->host_no, 232 ha->host_no,
233 cmd->device->channel, cmd->device->id, 233 cmd->device->channel, cmd->device->id,
234 cmd->device->lun, __func__, residual)); 234 cmd->device->lun, __func__));
235 235
236 cmd->result = DID_ERROR << 16; 236 cmd->result = DID_ERROR << 16;
237 break; 237 break;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2de5f3ad640b..b6e03074cb8f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -994,7 +994,7 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
994 * all the existing users tried this hard. 994 * all the existing users tried this hard.
995 */ 995 */
996 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, 996 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
997 len + 4, NULL, 30 * HZ, 3, NULL); 997 len, NULL, 30 * HZ, 3, NULL);
998 if (result) 998 if (result)
999 return result; 999 return result;
1000 1000
@@ -1021,13 +1021,14 @@ unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
1021{ 1021{
1022 int i, result; 1022 int i, result;
1023 unsigned int len; 1023 unsigned int len;
1024 unsigned char *buf = kmalloc(259, GFP_KERNEL); 1024 const unsigned int init_vpd_len = 255;
1025 unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
1025 1026
1026 if (!buf) 1027 if (!buf)
1027 return NULL; 1028 return NULL;
1028 1029
1029 /* Ask for all the pages supported by this device */ 1030 /* Ask for all the pages supported by this device */
1030 result = scsi_vpd_inquiry(sdev, buf, 0, 255); 1031 result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len);
1031 if (result) 1032 if (result)
1032 goto fail; 1033 goto fail;
1033 1034
@@ -1050,12 +1051,12 @@ unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
1050 * Some pages are longer than 255 bytes. The actual length of 1051 * Some pages are longer than 255 bytes. The actual length of
1051 * the page is returned in the header. 1052 * the page is returned in the header.
1052 */ 1053 */
1053 len = (buf[2] << 8) | buf[3]; 1054 len = ((buf[2] << 8) | buf[3]) + 4;
1054 if (len <= 255) 1055 if (len <= init_vpd_len)
1055 return buf; 1056 return buf;
1056 1057
1057 kfree(buf); 1058 kfree(buf);
1058 buf = kmalloc(len + 4, GFP_KERNEL); 1059 buf = kmalloc(len, GFP_KERNEL);
1059 result = scsi_vpd_inquiry(sdev, buf, page, len); 1060 result = scsi_vpd_inquiry(sdev, buf, page, len);
1060 if (result) 1061 if (result)
1061 goto fail; 1062 goto fail;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a1689353d7fd..877204daf549 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -382,9 +382,13 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
382 * who knows? FIXME(eric) 382 * who knows? FIXME(eric)
383 */ 383 */
384 return SUCCESS; 384 return SUCCESS;
385 case RESERVATION_CONFLICT:
386 /*
387 * let issuer deal with this, it could be just fine
388 */
389 return SUCCESS;
385 case BUSY: 390 case BUSY:
386 case QUEUE_FULL: 391 case QUEUE_FULL:
387 case RESERVATION_CONFLICT:
388 default: 392 default:
389 return FAILED; 393 return FAILED;
390 } 394 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f3c40898fc7d..662024d86949 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -896,6 +896,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
896 scsi_print_result(cmd); 896 scsi_print_result(cmd);
897 if (driver_byte(result) & DRIVER_SENSE) 897 if (driver_byte(result) & DRIVER_SENSE)
898 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
899 scsi_print_command(cmd);
899 } 900 }
900 blk_end_request_all(req, -EIO); 901 blk_end_request_all(req, -EIO);
901 scsi_next_command(cmd); 902 scsi_next_command(cmd);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 292c02f810d0..b98885de6876 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -291,7 +291,7 @@ static void fc_scsi_scan_rport(struct work_struct *work);
291#define FC_STARGET_NUM_ATTRS 3 291#define FC_STARGET_NUM_ATTRS 3
292#define FC_RPORT_NUM_ATTRS 10 292#define FC_RPORT_NUM_ATTRS 10
293#define FC_VPORT_NUM_ATTRS 9 293#define FC_VPORT_NUM_ATTRS 9
294#define FC_HOST_NUM_ATTRS 21 294#define FC_HOST_NUM_ATTRS 22
295 295
296struct fc_internal { 296struct fc_internal {
297 struct scsi_transport_template t; 297 struct scsi_transport_template t;
@@ -3432,7 +3432,7 @@ fc_bsg_jobdone(struct fc_bsg_job *job)
3432 3432
3433/** 3433/**
3434 * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests 3434 * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
3435 * @req: BSG request that holds the job to be destroyed 3435 * @rq: BSG request that holds the job to be destroyed
3436 */ 3436 */
3437static void fc_bsg_softirq_done(struct request *rq) 3437static void fc_bsg_softirq_done(struct request *rq)
3438{ 3438{
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b47240ca4b19..ad897df36615 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -36,6 +36,38 @@
36 36
37#define ISCSI_TRANSPORT_VERSION "2.0-870" 37#define ISCSI_TRANSPORT_VERSION "2.0-870"
38 38
39static int dbg_session;
40module_param_named(debug_session, dbg_session, int,
41 S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(debug_session,
43 "Turn on debugging for sessions in scsi_transport_iscsi "
44 "module. Set to 1 to turn on, and zero to turn off. Default "
45 "is off.");
46
47static int dbg_conn;
48module_param_named(debug_conn, dbg_conn, int,
49 S_IRUGO | S_IWUSR);
50MODULE_PARM_DESC(debug_conn,
51 "Turn on debugging for connections in scsi_transport_iscsi "
52 "module. Set to 1 to turn on, and zero to turn off. Default "
53 "is off.");
54
55#define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \
56 do { \
57 if (dbg_session) \
58 iscsi_cls_session_printk(KERN_INFO, _session, \
59 "%s: " dbg_fmt, \
60 __func__, ##arg); \
61 } while (0);
62
63#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \
64 do { \
65 if (dbg_conn) \
66 iscsi_cls_conn_printk(KERN_INFO, _conn, \
67 "%s: " dbg_fmt, \
68 __func__, ##arg); \
69 } while (0);
70
39struct iscsi_internal { 71struct iscsi_internal {
40 struct scsi_transport_template t; 72 struct scsi_transport_template t;
41 struct iscsi_transport *iscsi_transport; 73 struct iscsi_transport *iscsi_transport;
@@ -377,6 +409,7 @@ static void iscsi_session_release(struct device *dev)
377 409
378 shost = iscsi_session_to_shost(session); 410 shost = iscsi_session_to_shost(session);
379 scsi_host_put(shost); 411 scsi_host_put(shost);
412 ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n");
380 kfree(session); 413 kfree(session);
381} 414}
382 415
@@ -441,6 +474,9 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
441 return 0; 474 return 0;
442 475
443 session = iscsi_dev_to_session(dev); 476 session = iscsi_dev_to_session(dev);
477
478 ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n");
479
444 shost = iscsi_session_to_shost(session); 480 shost = iscsi_session_to_shost(session);
445 ihost = shost->shost_data; 481 ihost = shost->shost_data;
446 482
@@ -448,8 +484,7 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
448 spin_lock_irqsave(&session->lock, flags); 484 spin_lock_irqsave(&session->lock, flags);
449 if (session->state != ISCSI_SESSION_LOGGED_IN) { 485 if (session->state != ISCSI_SESSION_LOGGED_IN) {
450 spin_unlock_irqrestore(&session->lock, flags); 486 spin_unlock_irqrestore(&session->lock, flags);
451 mutex_unlock(&ihost->mutex); 487 goto user_scan_exit;
452 return 0;
453 } 488 }
454 id = session->target_id; 489 id = session->target_id;
455 spin_unlock_irqrestore(&session->lock, flags); 490 spin_unlock_irqrestore(&session->lock, flags);
@@ -462,7 +497,10 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
462 scsi_scan_target(&session->dev, 0, id, 497 scsi_scan_target(&session->dev, 0, id,
463 scan_data->lun, 1); 498 scan_data->lun, 1);
464 } 499 }
500
501user_scan_exit:
465 mutex_unlock(&ihost->mutex); 502 mutex_unlock(&ihost->mutex);
503 ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n");
466 return 0; 504 return 0;
467} 505}
468 506
@@ -522,7 +560,9 @@ static void session_recovery_timedout(struct work_struct *work)
522 if (session->transport->session_recovery_timedout) 560 if (session->transport->session_recovery_timedout)
523 session->transport->session_recovery_timedout(session); 561 session->transport->session_recovery_timedout(session);
524 562
563 ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
525 scsi_target_unblock(&session->dev); 564 scsi_target_unblock(&session->dev);
565 ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
526} 566}
527 567
528static void __iscsi_unblock_session(struct work_struct *work) 568static void __iscsi_unblock_session(struct work_struct *work)
@@ -534,6 +574,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
534 struct iscsi_cls_host *ihost = shost->shost_data; 574 struct iscsi_cls_host *ihost = shost->shost_data;
535 unsigned long flags; 575 unsigned long flags;
536 576
577 ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
537 /* 578 /*
538 * The recovery and unblock work get run from the same workqueue, 579 * The recovery and unblock work get run from the same workqueue,
539 * so try to cancel it if it was going to run after this unblock. 580 * so try to cancel it if it was going to run after this unblock.
@@ -553,6 +594,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
553 if (scsi_queue_work(shost, &session->scan_work)) 594 if (scsi_queue_work(shost, &session->scan_work))
554 atomic_inc(&ihost->nr_scans); 595 atomic_inc(&ihost->nr_scans);
555 } 596 }
597 ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
556} 598}
557 599
558/** 600/**
@@ -579,10 +621,12 @@ static void __iscsi_block_session(struct work_struct *work)
579 block_work); 621 block_work);
580 unsigned long flags; 622 unsigned long flags;
581 623
624 ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n");
582 spin_lock_irqsave(&session->lock, flags); 625 spin_lock_irqsave(&session->lock, flags);
583 session->state = ISCSI_SESSION_FAILED; 626 session->state = ISCSI_SESSION_FAILED;
584 spin_unlock_irqrestore(&session->lock, flags); 627 spin_unlock_irqrestore(&session->lock, flags);
585 scsi_target_block(&session->dev); 628 scsi_target_block(&session->dev);
629 ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
586 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, 630 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
587 session->recovery_tmo * HZ); 631 session->recovery_tmo * HZ);
588} 632}
@@ -602,6 +646,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
602 struct iscsi_cls_host *ihost = shost->shost_data; 646 struct iscsi_cls_host *ihost = shost->shost_data;
603 unsigned long flags; 647 unsigned long flags;
604 648
649 ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
650
605 /* Prevent new scans and make sure scanning is not in progress */ 651 /* Prevent new scans and make sure scanning is not in progress */
606 mutex_lock(&ihost->mutex); 652 mutex_lock(&ihost->mutex);
607 spin_lock_irqsave(&session->lock, flags); 653 spin_lock_irqsave(&session->lock, flags);
@@ -616,6 +662,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
616 662
617 scsi_remove_target(&session->dev); 663 scsi_remove_target(&session->dev);
618 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); 664 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
665 ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
619} 666}
620 667
621struct iscsi_cls_session * 668struct iscsi_cls_session *
@@ -647,6 +694,8 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
647 device_initialize(&session->dev); 694 device_initialize(&session->dev);
648 if (dd_size) 695 if (dd_size)
649 session->dd_data = &session[1]; 696 session->dd_data = &session[1];
697
698 ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n");
650 return session; 699 return session;
651} 700}
652EXPORT_SYMBOL_GPL(iscsi_alloc_session); 701EXPORT_SYMBOL_GPL(iscsi_alloc_session);
@@ -712,6 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
712 spin_unlock_irqrestore(&sesslock, flags); 761 spin_unlock_irqrestore(&sesslock, flags);
713 762
714 iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION); 763 iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
764 ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
715 return 0; 765 return 0;
716 766
717release_host: 767release_host:
@@ -752,6 +802,7 @@ static void iscsi_conn_release(struct device *dev)
752 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev); 802 struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
753 struct device *parent = conn->dev.parent; 803 struct device *parent = conn->dev.parent;
754 804
805 ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n");
755 kfree(conn); 806 kfree(conn);
756 put_device(parent); 807 put_device(parent);
757} 808}
@@ -774,6 +825,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
774 unsigned long flags; 825 unsigned long flags;
775 int err; 826 int err;
776 827
828 ISCSI_DBG_TRANS_SESSION(session, "Removing session\n");
829
777 spin_lock_irqsave(&sesslock, flags); 830 spin_lock_irqsave(&sesslock, flags);
778 list_del(&session->sess_list); 831 list_del(&session->sess_list);
779 spin_unlock_irqrestore(&sesslock, flags); 832 spin_unlock_irqrestore(&sesslock, flags);
@@ -807,12 +860,15 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
807 "for session. Error %d.\n", err); 860 "for session. Error %d.\n", err);
808 861
809 transport_unregister_device(&session->dev); 862 transport_unregister_device(&session->dev);
863
864 ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
810 device_del(&session->dev); 865 device_del(&session->dev);
811} 866}
812EXPORT_SYMBOL_GPL(iscsi_remove_session); 867EXPORT_SYMBOL_GPL(iscsi_remove_session);
813 868
814void iscsi_free_session(struct iscsi_cls_session *session) 869void iscsi_free_session(struct iscsi_cls_session *session)
815{ 870{
871 ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
816 iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION); 872 iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
817 put_device(&session->dev); 873 put_device(&session->dev);
818} 874}
@@ -828,6 +884,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
828int iscsi_destroy_session(struct iscsi_cls_session *session) 884int iscsi_destroy_session(struct iscsi_cls_session *session)
829{ 885{
830 iscsi_remove_session(session); 886 iscsi_remove_session(session);
887 ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
831 iscsi_free_session(session); 888 iscsi_free_session(session);
832 return 0; 889 return 0;
833} 890}
@@ -885,6 +942,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
885 list_add(&conn->conn_list, &connlist); 942 list_add(&conn->conn_list, &connlist);
886 conn->active = 1; 943 conn->active = 1;
887 spin_unlock_irqrestore(&connlock, flags); 944 spin_unlock_irqrestore(&connlock, flags);
945
946 ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
888 return conn; 947 return conn;
889 948
890release_parent_ref: 949release_parent_ref:
@@ -912,6 +971,7 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
912 spin_unlock_irqrestore(&connlock, flags); 971 spin_unlock_irqrestore(&connlock, flags);
913 972
914 transport_unregister_device(&conn->dev); 973 transport_unregister_device(&conn->dev);
974 ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
915 device_unregister(&conn->dev); 975 device_unregister(&conn->dev);
916 return 0; 976 return 0;
917} 977}
@@ -1200,6 +1260,9 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1200 "Cannot notify userspace of session " 1260 "Cannot notify userspace of session "
1201 "event %u. Check iscsi daemon\n", 1261 "event %u. Check iscsi daemon\n",
1202 event); 1262 event);
1263
1264 ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n",
1265 event, rc);
1203 return rc; 1266 return rc;
1204} 1267}
1205EXPORT_SYMBOL_GPL(iscsi_session_event); 1268EXPORT_SYMBOL_GPL(iscsi_session_event);
@@ -1221,6 +1284,8 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
1221 shost = iscsi_session_to_shost(session); 1284 shost = iscsi_session_to_shost(session);
1222 ev->r.c_session_ret.host_no = shost->host_no; 1285 ev->r.c_session_ret.host_no = shost->host_no;
1223 ev->r.c_session_ret.sid = session->sid; 1286 ev->r.c_session_ret.sid = session->sid;
1287 ISCSI_DBG_TRANS_SESSION(session,
1288 "Completed creating transport session\n");
1224 return 0; 1289 return 0;
1225} 1290}
1226 1291
@@ -1246,6 +1311,8 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1246 1311
1247 ev->r.c_conn_ret.sid = session->sid; 1312 ev->r.c_conn_ret.sid = session->sid;
1248 ev->r.c_conn_ret.cid = conn->cid; 1313 ev->r.c_conn_ret.cid = conn->cid;
1314
1315 ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n");
1249 return 0; 1316 return 0;
1250} 1317}
1251 1318
@@ -1258,8 +1325,10 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
1258 if (!conn) 1325 if (!conn)
1259 return -EINVAL; 1326 return -EINVAL;
1260 1327
1328 ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
1261 if (transport->destroy_conn) 1329 if (transport->destroy_conn)
1262 transport->destroy_conn(conn); 1330 transport->destroy_conn(conn);
1331
1263 return 0; 1332 return 0;
1264} 1333}
1265 1334
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0895d3c71b03..fd47cb1bee1b 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1692,10 +1692,6 @@ sas_attach_transport(struct sas_function_template *ft)
1692 i->f = ft; 1692 i->f = ft;
1693 1693
1694 count = 0; 1694 count = 0;
1695 SETUP_PORT_ATTRIBUTE(num_phys);
1696 i->host_attrs[count] = NULL;
1697
1698 count = 0;
1699 SETUP_PHY_ATTRIBUTE(initiator_port_protocols); 1695 SETUP_PHY_ATTRIBUTE(initiator_port_protocols);
1700 SETUP_PHY_ATTRIBUTE(target_port_protocols); 1696 SETUP_PHY_ATTRIBUTE(target_port_protocols);
1701 SETUP_PHY_ATTRIBUTE(device_type); 1697 SETUP_PHY_ATTRIBUTE(device_type);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b7b9fec67a98..a89c421dab51 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2021,6 +2021,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2021 2021
2022 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2022 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2023 sdp->removable ? "removable " : ""); 2023 sdp->removable ? "removable " : "");
2024 put_device(&sdkp->dev);
2024} 2025}
2025 2026
2026/** 2027/**
@@ -2106,6 +2107,7 @@ static int sd_probe(struct device *dev)
2106 2107
2107 get_device(&sdp->sdev_gendev); 2108 get_device(&sdp->sdev_gendev);
2108 2109
2110 get_device(&sdkp->dev); /* prevent release before async_schedule */
2109 async_schedule(sd_probe_async, sdkp); 2111 async_schedule(sd_probe_async, sdkp);
2110 2112
2111 return 0; 2113 return 0;
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 4f618f487356..55b034b72708 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -347,6 +347,97 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
347 return 0; 347 return 0;
348} 348}
349 349
350#define INIT_ALLOC_SIZE 32
351
352static void ses_enclosure_data_process(struct enclosure_device *edev,
353 struct scsi_device *sdev,
354 int create)
355{
356 u32 result;
357 unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL;
358 int i, j, page7_len, len, components;
359 struct ses_device *ses_dev = edev->scratch;
360 int types = ses_dev->page1[10];
361 unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
362
363 if (!hdr_buf)
364 goto simple_populate;
365
366 /* re-read page 10 */
367 if (ses_dev->page10)
368 ses_recv_diag(sdev, 10, ses_dev->page10, ses_dev->page10_len);
369 /* Page 7 for the descriptors is optional */
370 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
371 if (result)
372 goto simple_populate;
373
374 page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
375 /* add 1 for trailing '\0' we'll use */
376 buf = kzalloc(len + 1, GFP_KERNEL);
377 if (!buf)
378 goto simple_populate;
379 result = ses_recv_diag(sdev, 7, buf, len);
380 if (result) {
381 simple_populate:
382 kfree(buf);
383 buf = NULL;
384 desc_ptr = NULL;
385 len = 0;
386 page7_len = 0;
387 } else {
388 desc_ptr = buf + 8;
389 len = (desc_ptr[2] << 8) + desc_ptr[3];
390 /* skip past overall descriptor */
391 desc_ptr += len + 4;
392 if (ses_dev->page10)
393 addl_desc_ptr = ses_dev->page10 + 8;
394 }
395 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
396 components = 0;
397 for (i = 0; i < types; i++, type_ptr += 4) {
398 for (j = 0; j < type_ptr[1]; j++) {
399 char *name = NULL;
400 struct enclosure_component *ecomp;
401
402 if (desc_ptr) {
403 if (desc_ptr >= buf + page7_len) {
404 desc_ptr = NULL;
405 } else {
406 len = (desc_ptr[2] << 8) + desc_ptr[3];
407 desc_ptr += 4;
408 /* Add trailing zero - pushes into
409 * reserved space */
410 desc_ptr[len] = '\0';
411 name = desc_ptr;
412 }
413 }
414 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
415 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
416
417 if (create)
418 ecomp = enclosure_component_register(edev,
419 components++,
420 type_ptr[0],
421 name);
422 else
423 ecomp = &edev->component[components++];
424
425 if (!IS_ERR(ecomp) && addl_desc_ptr)
426 ses_process_descriptor(ecomp,
427 addl_desc_ptr);
428 }
429 if (desc_ptr)
430 desc_ptr += len;
431
432 if (addl_desc_ptr)
433 addl_desc_ptr += addl_desc_ptr[1] + 2;
434
435 }
436 }
437 kfree(buf);
438 kfree(hdr_buf);
439}
440
350static void ses_match_to_enclosure(struct enclosure_device *edev, 441static void ses_match_to_enclosure(struct enclosure_device *edev,
351 struct scsi_device *sdev) 442 struct scsi_device *sdev)
352{ 443{
@@ -361,6 +452,8 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
361 if (!buf) 452 if (!buf)
362 return; 453 return;
363 454
455 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
456
364 vpd_len = ((buf[2] << 8) | buf[3]) + 4; 457 vpd_len = ((buf[2] << 8) | buf[3]) + 4;
365 458
366 desc = buf + 4; 459 desc = buf + 4;
@@ -395,28 +488,26 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
395 kfree(buf); 488 kfree(buf);
396} 489}
397 490
398#define INIT_ALLOC_SIZE 32
399
400static int ses_intf_add(struct device *cdev, 491static int ses_intf_add(struct device *cdev,
401 struct class_interface *intf) 492 struct class_interface *intf)
402{ 493{
403 struct scsi_device *sdev = to_scsi_device(cdev->parent); 494 struct scsi_device *sdev = to_scsi_device(cdev->parent);
404 struct scsi_device *tmp_sdev; 495 struct scsi_device *tmp_sdev;
405 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL, 496 unsigned char *buf = NULL, *hdr_buf, *type_ptr;
406 *addl_desc_ptr = NULL;
407 struct ses_device *ses_dev; 497 struct ses_device *ses_dev;
408 u32 result; 498 u32 result;
409 int i, j, types, len, page7_len = 0, components = 0; 499 int i, types, len, components = 0;
410 int err = -ENOMEM; 500 int err = -ENOMEM;
411 struct enclosure_device *edev; 501 struct enclosure_device *edev;
412 struct ses_component *scomp = NULL; 502 struct ses_component *scomp = NULL;
413 503
414 if (!scsi_device_enclosure(sdev)) { 504 if (!scsi_device_enclosure(sdev)) {
415 /* not an enclosure, but might be in one */ 505 /* not an enclosure, but might be in one */
416 edev = enclosure_find(&sdev->host->shost_gendev); 506 struct enclosure_device *prev = NULL;
417 if (edev) { 507
508 while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
418 ses_match_to_enclosure(edev, sdev); 509 ses_match_to_enclosure(edev, sdev);
419 put_device(&edev->edev); 510 prev = edev;
420 } 511 }
421 return -ENODEV; 512 return -ENODEV;
422 } 513 }
@@ -500,6 +591,7 @@ static int ses_intf_add(struct device *cdev,
500 ses_dev->page10_len = len; 591 ses_dev->page10_len = len;
501 buf = NULL; 592 buf = NULL;
502 } 593 }
594 kfree(hdr_buf);
503 595
504 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); 596 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
505 if (!scomp) 597 if (!scomp)
@@ -516,72 +608,7 @@ static int ses_intf_add(struct device *cdev,
516 for (i = 0; i < components; i++) 608 for (i = 0; i < components; i++)
517 edev->component[i].scratch = scomp + i; 609 edev->component[i].scratch = scomp + i;
518 610
519 /* Page 7 for the descriptors is optional */ 611 ses_enclosure_data_process(edev, sdev, 1);
520 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
521 if (result)
522 goto simple_populate;
523
524 page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
525 /* add 1 for trailing '\0' we'll use */
526 buf = kzalloc(len + 1, GFP_KERNEL);
527 if (!buf)
528 goto simple_populate;
529 result = ses_recv_diag(sdev, 7, buf, len);
530 if (result) {
531 simple_populate:
532 kfree(buf);
533 buf = NULL;
534 desc_ptr = NULL;
535 addl_desc_ptr = NULL;
536 } else {
537 desc_ptr = buf + 8;
538 len = (desc_ptr[2] << 8) + desc_ptr[3];
539 /* skip past overall descriptor */
540 desc_ptr += len + 4;
541 if (ses_dev->page10)
542 addl_desc_ptr = ses_dev->page10 + 8;
543 }
544 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
545 components = 0;
546 for (i = 0; i < types; i++, type_ptr += 4) {
547 for (j = 0; j < type_ptr[1]; j++) {
548 char *name = NULL;
549 struct enclosure_component *ecomp;
550
551 if (desc_ptr) {
552 if (desc_ptr >= buf + page7_len) {
553 desc_ptr = NULL;
554 } else {
555 len = (desc_ptr[2] << 8) + desc_ptr[3];
556 desc_ptr += 4;
557 /* Add trailing zero - pushes into
558 * reserved space */
559 desc_ptr[len] = '\0';
560 name = desc_ptr;
561 }
562 }
563 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
564 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
565
566 ecomp = enclosure_component_register(edev,
567 components++,
568 type_ptr[0],
569 name);
570
571 if (!IS_ERR(ecomp) && addl_desc_ptr)
572 ses_process_descriptor(ecomp,
573 addl_desc_ptr);
574 }
575 if (desc_ptr)
576 desc_ptr += len;
577
578 if (addl_desc_ptr)
579 addl_desc_ptr += addl_desc_ptr[1] + 2;
580
581 }
582 }
583 kfree(buf);
584 kfree(hdr_buf);
585 612
586 /* see if there are any devices matching before 613 /* see if there are any devices matching before
587 * we found the enclosure */ 614 * we found the enclosure */
@@ -615,17 +642,26 @@ static int ses_remove(struct device *dev)
615 return 0; 642 return 0;
616} 643}
617 644
618static void ses_intf_remove(struct device *cdev, 645static void ses_intf_remove_component(struct scsi_device *sdev)
619 struct class_interface *intf) 646{
647 struct enclosure_device *edev, *prev = NULL;
648
649 while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
650 prev = edev;
651 if (!enclosure_remove_device(edev, &sdev->sdev_gendev))
652 break;
653 }
654 if (edev)
655 put_device(&edev->edev);
656}
657
658static void ses_intf_remove_enclosure(struct scsi_device *sdev)
620{ 659{
621 struct scsi_device *sdev = to_scsi_device(cdev->parent);
622 struct enclosure_device *edev; 660 struct enclosure_device *edev;
623 struct ses_device *ses_dev; 661 struct ses_device *ses_dev;
624 662
625 if (!scsi_device_enclosure(sdev)) 663 /* exact match to this enclosure */
626 return; 664 edev = enclosure_find(&sdev->sdev_gendev, NULL);
627
628 edev = enclosure_find(cdev->parent);
629 if (!edev) 665 if (!edev)
630 return; 666 return;
631 667
@@ -643,6 +679,17 @@ static void ses_intf_remove(struct device *cdev,
643 enclosure_unregister(edev); 679 enclosure_unregister(edev);
644} 680}
645 681
682static void ses_intf_remove(struct device *cdev,
683 struct class_interface *intf)
684{
685 struct scsi_device *sdev = to_scsi_device(cdev->parent);
686
687 if (!scsi_device_enclosure(sdev))
688 ses_intf_remove_component(sdev);
689 else
690 ses_intf_remove_enclosure(sdev);
691}
692
646static struct class_interface ses_interface = { 693static struct class_interface ses_interface = {
647 .add_dev = ses_intf_add, 694 .add_dev = ses_intf_add,
648 .remove_dev = ses_intf_remove, 695 .remove_dev = ses_intf_remove,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9230402c45af..4968c4ced385 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1811,7 +1811,7 @@ retry:
1811 return 0; 1811 return 0;
1812out: 1812out:
1813 for (i = 0; i < k; i++) 1813 for (i = 0; i < k; i++)
1814 __free_pages(schp->pages[k], order); 1814 __free_pages(schp->pages[i], order);
1815 1815
1816 if (--order >= 0) 1816 if (--order >= 0)
1817 goto retry; 1817 goto retry;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 8d2a95c4e5b5..09fa8861fc58 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -55,6 +55,7 @@ enum {
55 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ 55 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
56 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ 56 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
57 57
58 YIOA_STATUS = 0x00,
58 YH2I_INT = 0x20, 59 YH2I_INT = 0x20,
59 YINT_EN = 0x34, 60 YINT_EN = 0x34,
60 YI2H_INT = 0x9c, 61 YI2H_INT = 0x9c,
@@ -108,6 +109,10 @@ enum {
108 109
109 SS_HEAD_HANDSHAKE = 0x80, 110 SS_HEAD_HANDSHAKE = 0x80,
110 111
112 SS_H2I_INT_RESET = 0x100,
113
114 SS_MU_OPERATIONAL = 0x80000000,
115
111 STEX_CDB_LENGTH = 16, 116 STEX_CDB_LENGTH = 16,
112 STATUS_VAR_LEN = 128, 117 STATUS_VAR_LEN = 128,
113 118
@@ -884,7 +889,7 @@ static void stex_ss_mu_intr(struct st_hba *hba)
884 tag = (u16)value; 889 tag = (u16)value;
885 if (unlikely(tag >= hba->host->can_queue)) { 890 if (unlikely(tag >= hba->host->can_queue)) {
886 printk(KERN_WARNING DRV_NAME 891 printk(KERN_WARNING DRV_NAME
887 "(%s): invalid tag\n", pci_name(hba->pdev)); 892 "(%s): invalid tag\n", pci_name(hba->pdev));
888 continue; 893 continue;
889 } 894 }
890 895
@@ -1040,16 +1045,27 @@ static int stex_ss_handshake(struct st_hba *hba)
1040 void __iomem *base = hba->mmio_base; 1045 void __iomem *base = hba->mmio_base;
1041 struct st_msg_header *msg_h; 1046 struct st_msg_header *msg_h;
1042 struct handshake_frame *h; 1047 struct handshake_frame *h;
1043 __le32 *scratch = hba->scratch; 1048 __le32 *scratch;
1044 u32 data; 1049 u32 data;
1045 unsigned long before; 1050 unsigned long before;
1046 int ret = 0; 1051 int ret = 0;
1047 1052
1048 h = (struct handshake_frame *)(hba->alloc_rq(hba)); 1053 before = jiffies;
1049 msg_h = (struct st_msg_header *)h - 1; 1054 while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
1055 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1056 printk(KERN_ERR DRV_NAME
1057 "(%s): firmware not operational\n",
1058 pci_name(hba->pdev));
1059 return -1;
1060 }
1061 msleep(1);
1062 }
1063
1064 msg_h = (struct st_msg_header *)hba->dma_mem;
1050 msg_h->handle = cpu_to_le64(hba->dma_handle); 1065 msg_h->handle = cpu_to_le64(hba->dma_handle);
1051 msg_h->flag = SS_HEAD_HANDSHAKE; 1066 msg_h->flag = SS_HEAD_HANDSHAKE;
1052 1067
1068 h = (struct handshake_frame *)(msg_h + 1);
1053 h->rb_phy = cpu_to_le64(hba->dma_handle); 1069 h->rb_phy = cpu_to_le64(hba->dma_handle);
1054 h->req_sz = cpu_to_le16(hba->rq_size); 1070 h->req_sz = cpu_to_le16(hba->rq_size);
1055 h->req_cnt = cpu_to_le16(hba->rq_count+1); 1071 h->req_cnt = cpu_to_le16(hba->rq_count+1);
@@ -1205,6 +1221,13 @@ static void stex_hard_reset(struct st_hba *hba)
1205 hba->pdev->saved_config_space[i]); 1221 hba->pdev->saved_config_space[i]);
1206} 1222}
1207 1223
1224static void stex_ss_reset(struct st_hba *hba)
1225{
1226 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1227 readl(hba->mmio_base + YH2I_INT);
1228 ssleep(5);
1229}
1230
1208static int stex_reset(struct scsi_cmnd *cmd) 1231static int stex_reset(struct scsi_cmnd *cmd)
1209{ 1232{
1210 struct st_hba *hba; 1233 struct st_hba *hba;
@@ -1221,6 +1244,8 @@ static int stex_reset(struct scsi_cmnd *cmd)
1221 1244
1222 if (hba->cardtype == st_shasta) 1245 if (hba->cardtype == st_shasta)
1223 stex_hard_reset(hba); 1246 stex_hard_reset(hba);
1247 else if (hba->cardtype == st_yel)
1248 stex_ss_reset(hba);
1224 1249
1225 if (hba->cardtype != st_yosemite) { 1250 if (hba->cardtype != st_yosemite) {
1226 if (stex_handshake(hba)) { 1251 if (stex_handshake(hba)) {