aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-27 22:52:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-27 22:52:57 -0400
commit426048313dfa7d65dbd2379b1665755511f9544f (patch)
treedc727b9e41eb3d9dfe8e68f14b027c776d8aba98 /drivers/scsi
parent2a56d2220284b0e4dd8569fa475d7053f1c40a63 (diff)
parent7ad20aa9d39a525542b0840ac38bfc77be831e19 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (60 commits) [SCSI] lpfc 8.3.24: Extend BSG infrastructure and add link diagnostics [SCSI] lpfc 8.3.24: Add resource extent support [SCSI] lpfc 8.3.24: Add request-firmware support [SCSI] lpfc 8.3.24: Add SR-IOV control [SCSI] lpfc 8.3.24: Extended hardware support and support dump images [SCSI] lpfc 8.3.24: Miscellaneous Fixes and Corrections [SCSI] libsas: Add option for SATA soft reset [SCSI] libsas: check dev->gone before submitting sata i/o [SCSI] libsas: fix/amend device gone notification in sas_deform_port() [SCSI] MAINTAINERS update for SCSI (new email address) [SCSI] Fix Ultrastor asm snippet [SCSI] osst: fix warning [SCSI] osst: wrong index used in inner loop [SCSI] aic94xx: world-writable sysfs update_bios file [SCSI] MAINTAINERS: Add drivers/target/ entry [SCSI] target: Convert TASK_ATTR to scsi_tcq.h definitions [SCSI] target: Convert REPORT_LUNs to use int_to_scsilun [SCSI] target: Fix task->task_execute_queue=1 clear bug + LUN_RESET OOPs [SCSI] target: Fix bug with task_sg chained transport_free_dev_tasks release [SCSI] target: Fix interrupt context bug with stats_lock and core_tmr_alloc_req ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h1
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c11
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c26
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c58
-rw-r--r--drivers/scsi/fcoe/fcoe.h10
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c133
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c40
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c16
-rw-r--r--drivers/scsi/libfc/fc_libfc.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c60
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_phy.c4
-rw-r--r--drivers/scsi/libsas/sas_port.c21
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h43
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c312
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h87
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c54
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h501
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c545
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1659
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h33
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c93
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c83
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c102
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c69
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h11
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h23
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c77
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c87
-rw-r--r--drivers/scsi/scsi_trace.c4
-rw-r--r--drivers/scsi/sd.c82
-rw-r--r--drivers/scsi/ultrastor.c2
64 files changed, 5775 insertions, 1141 deletions
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 3b7e83d2dab4..d5ff142c93a2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
486 flash_error_table[i].reason); 486 flash_error_table[i].reason);
487} 487}
488 488
489static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO, 489static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
490 asd_show_update_bios, asd_store_update_bios); 490 asd_show_update_bios, asd_store_update_bios);
491 491
492static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) 492static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index c1f72c49196f..6c7e0339dda4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -56,6 +56,8 @@ BFA_TRC_FILE(CNA, IOC);
56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57#define bfa_ioc_notify_fail(__ioc) \ 57#define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59#define bfa_ioc_sync_start(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
59#define bfa_ioc_sync_join(__ioc) \ 61#define bfa_ioc_sync_join(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 62 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
61#define bfa_ioc_sync_leave(__ioc) \ 63#define bfa_ioc_sync_leave(__ioc) \
@@ -647,7 +649,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
647 switch (event) { 649 switch (event) {
648 case IOCPF_E_SEMLOCKED: 650 case IOCPF_E_SEMLOCKED:
649 if (bfa_ioc_firmware_lock(ioc)) { 651 if (bfa_ioc_firmware_lock(ioc)) {
650 if (bfa_ioc_sync_complete(ioc)) { 652 if (bfa_ioc_sync_start(ioc)) {
651 iocpf->retry_count = 0; 653 iocpf->retry_count = 0;
652 bfa_ioc_sync_join(ioc); 654 bfa_ioc_sync_join(ioc);
653 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 655 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index ec9cf08b0e7f..c85182a704fb 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -263,6 +263,7 @@ struct bfa_ioc_hwif_s {
263 bfa_boolean_t msix); 263 bfa_boolean_t msix);
264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc); 264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
266 bfa_boolean_t (*ioc_sync_start) (struct bfa_ioc_s *ioc);
266 void (*ioc_sync_join) (struct bfa_ioc_s *ioc); 267 void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
267 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc); 268 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
268 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); 269 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index e4a0713185b6..89ae4c8f95a2 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -32,6 +32,7 @@ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc); 33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); 34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
35static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc); 36static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
36static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); 37static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
37static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); 38static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
@@ -53,6 +54,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
53 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; 54 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
54 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail; 55 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
55 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; 56 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
57 hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
56 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join; 58 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
57 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; 59 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
58 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; 60 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
@@ -195,6 +197,15 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
195} 197}
196 198
197/* 199/*
200 * Synchronized IOC failure processing routines
201 */
202static bfa_boolean_t
203bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
204{
205 return bfa_ioc_cb_sync_complete(ioc);
206}
207
208/*
198 * Cleanup hw semaphore and usecnt registers 209 * Cleanup hw semaphore and usecnt registers
199 */ 210 */
200static void 211static void
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 008d129ddfcd..93612520f0d2 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); 45static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 46static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 47static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
@@ -62,6 +63,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
62 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 63 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
63 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 64 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
64 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 65 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
66 hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
65 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 67 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
66 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 68 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
67 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; 69 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -351,6 +353,30 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
351 writel(1, ioc->ioc_regs.ioc_sem_reg); 353 writel(1, ioc->ioc_regs.ioc_sem_reg);
352} 354}
353 355
356static bfa_boolean_t
357bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
358{
359 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
360 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
361
362 /*
363 * Driver load time. If the sync required bit for this PCI fn
364 * is set, it is due to an unclean exit by the driver for this
365 * PCI fn in the previous incarnation. Whoever comes here first
366 * should clean it up, no matter which PCI fn.
367 */
368
369 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
370 writel(0, ioc->ioc_regs.ioc_fail_sync);
371 writel(1, ioc->ioc_regs.ioc_usage_reg);
372 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
373 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
374 return BFA_TRUE;
375 }
376
377 return bfa_ioc_ct_sync_complete(ioc);
378}
379
354/* 380/*
355 * Synchronized IOC failure processing routines 381 * Synchronized IOC failure processing routines
356 */ 382 */
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index cfd59023227b..6bdd25a93db9 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -66,11 +66,11 @@
66#define BD_SPLIT_SIZE 32768 66#define BD_SPLIT_SIZE 32768
67 67
68/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ 68/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
69#define BNX2I_SQ_WQES_MIN 16 69#define BNX2I_SQ_WQES_MIN 16
70#define BNX2I_570X_SQ_WQES_MAX 128 70#define BNX2I_570X_SQ_WQES_MAX 128
71#define BNX2I_5770X_SQ_WQES_MAX 512 71#define BNX2I_5770X_SQ_WQES_MAX 512
72#define BNX2I_570X_SQ_WQES_DEFAULT 128 72#define BNX2I_570X_SQ_WQES_DEFAULT 128
73#define BNX2I_5770X_SQ_WQES_DEFAULT 256 73#define BNX2I_5770X_SQ_WQES_DEFAULT 128
74 74
75#define BNX2I_570X_CQ_WQES_MAX 128 75#define BNX2I_570X_CQ_WQES_MAX 128
76#define BNX2I_5770X_CQ_WQES_MAX 512 76#define BNX2I_5770X_CQ_WQES_MAX 512
@@ -115,6 +115,7 @@
115#define BNX2X_MAX_CQS 8 115#define BNX2X_MAX_CQS 8
116 116
117#define CNIC_ARM_CQE 1 117#define CNIC_ARM_CQE 1
118#define CNIC_ARM_CQE_FP 2
118#define CNIC_DISARM_CQE 0 119#define CNIC_DISARM_CQE 0
119 120
120#define REG_RD(__hba, offset) \ 121#define REG_RD(__hba, offset) \
@@ -666,7 +667,9 @@ enum {
666 * after HBA reset is completed by bnx2i/cnic/bnx2 667 * after HBA reset is completed by bnx2i/cnic/bnx2
667 * modules 668 * modules
668 * @state: tracks offload connection state machine 669 * @state: tracks offload connection state machine
669 * @teardown_mode: indicates if conn teardown is abortive or orderly 670 * @timestamp: tracks the start time when the ep begins to connect
671 * @num_active_cmds: tracks the number of outstanding commands for this ep
672 * @ec_shift: the amount of shift as part of the event coal calc
670 * @qp: QP information 673 * @qp: QP information
671 * @ids: contains chip allocated *context id* & driver assigned 674 * @ids: contains chip allocated *context id* & driver assigned
672 * *iscsi cid* 675 * *iscsi cid*
@@ -685,6 +688,7 @@ struct bnx2i_endpoint {
685 u32 state; 688 u32 state;
686 unsigned long timestamp; 689 unsigned long timestamp;
687 int num_active_cmds; 690 int num_active_cmds;
691 u32 ec_shift;
688 692
689 struct qp_info qp; 693 struct qp_info qp;
690 struct ep_handles ids; 694 struct ep_handles ids;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f0b89513faed..5c54a2d9b834 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -138,7 +138,6 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
138 u16 next_index; 138 u16 next_index;
139 u32 num_active_cmds; 139 u32 num_active_cmds;
140 140
141
142 /* Coalesce CQ entries only on 10G devices */ 141 /* Coalesce CQ entries only on 10G devices */
143 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 142 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
144 return; 143 return;
@@ -148,16 +147,19 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
148 * interrupts and other unwanted results 147 * interrupts and other unwanted results
149 */ 148 */
150 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; 149 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
151 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
152 return;
153 150
154 if (action == CNIC_ARM_CQE) { 151 if (action != CNIC_ARM_CQE_FP)
152 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
153 return;
154
155 if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
155 num_active_cmds = ep->num_active_cmds; 156 num_active_cmds = ep->num_active_cmds;
156 if (num_active_cmds <= event_coal_min) 157 if (num_active_cmds <= event_coal_min)
157 next_index = 1; 158 next_index = 1;
158 else 159 else
159 next_index = event_coal_min + 160 next_index = event_coal_min +
160 (num_active_cmds - event_coal_min) / event_coal_div; 161 ((num_active_cmds - event_coal_min) >>
162 ep->ec_shift);
161 if (!next_index) 163 if (!next_index)
162 next_index = 1; 164 next_index = 1;
163 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; 165 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -1274,6 +1276,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1274 iscsi_init.dummy_buffer_addr_hi = 1276 iscsi_init.dummy_buffer_addr_hi =
1275 (u32) ((u64) hba->dummy_buf_dma >> 32); 1277 (u32) ((u64) hba->dummy_buf_dma >> 32);
1276 1278
1279 hba->num_ccell = hba->max_sqes >> 1;
1277 hba->ctx_ccell_tasks = 1280 hba->ctx_ccell_tasks =
1278 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); 1281 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1279 iscsi_init.num_ccells_per_conn = hba->num_ccell; 1282 iscsi_init.num_ccells_per_conn = hba->num_ccell;
@@ -1934,7 +1937,6 @@ cqe_out:
1934 qp->cq_cons_idx++; 1937 qp->cq_cons_idx++;
1935 } 1938 }
1936 } 1939 }
1937 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1938} 1940}
1939 1941
1940/** 1942/**
@@ -1948,22 +1950,23 @@ cqe_out:
1948static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, 1950static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1949 struct iscsi_kcqe *new_cqe_kcqe) 1951 struct iscsi_kcqe *new_cqe_kcqe)
1950{ 1952{
1951 struct bnx2i_conn *conn; 1953 struct bnx2i_conn *bnx2i_conn;
1952 u32 iscsi_cid; 1954 u32 iscsi_cid;
1953 1955
1954 iscsi_cid = new_cqe_kcqe->iscsi_conn_id; 1956 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1955 conn = bnx2i_get_conn_from_id(hba, iscsi_cid); 1957 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1956 1958
1957 if (!conn) { 1959 if (!bnx2i_conn) {
1958 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); 1960 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1959 return; 1961 return;
1960 } 1962 }
1961 if (!conn->ep) { 1963 if (!bnx2i_conn->ep) {
1962 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); 1964 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1963 return; 1965 return;
1964 } 1966 }
1965 1967 bnx2i_process_new_cqes(bnx2i_conn);
1966 bnx2i_process_new_cqes(conn); 1968 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
1969 bnx2i_process_new_cqes(bnx2i_conn);
1967} 1970}
1968 1971
1969 1972
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1d24a2819736..6adbdc34a9a5 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
244 wait_event_interruptible_timeout(hba->eh_wait, 244 wait_event_interruptible_timeout(hba->eh_wait,
245 (list_empty(&hba->ep_ofld_list) && 245 (list_empty(&hba->ep_ofld_list) &&
246 list_empty(&hba->ep_destroy_list)), 246 list_empty(&hba->ep_destroy_list)),
247 10 * HZ); 247 2 * HZ);
248 /* Wait for all endpoints to be torn down, Chip will be reset once 248 /* Wait for all endpoints to be torn down, Chip will be reset once
249 * control returns to network driver. So it is required to cleanup and 249 * control returns to network driver. So it is required to cleanup and
250 * release all connection resources before returning from this routine. 250 * release all connection resources before returning from this routine.
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1809f9ccc4ce..041928b23cb0 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -379,6 +379,7 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
379{ 379{
380 struct iscsi_endpoint *ep; 380 struct iscsi_endpoint *ep;
381 struct bnx2i_endpoint *bnx2i_ep; 381 struct bnx2i_endpoint *bnx2i_ep;
382 u32 ec_div;
382 383
383 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); 384 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
384 if (!ep) { 385 if (!ep) {
@@ -393,6 +394,11 @@ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
393 bnx2i_ep->ep_iscsi_cid = (u16) -1; 394 bnx2i_ep->ep_iscsi_cid = (u16) -1;
394 bnx2i_ep->hba = hba; 395 bnx2i_ep->hba = hba;
395 bnx2i_ep->hba_age = hba->age; 396 bnx2i_ep->hba_age = hba->age;
397
398 ec_div = event_coal_div;
399 while (ec_div >>= 1)
400 bnx2i_ep->ec_shift += 1;
401
396 hba->ofld_conns_active++; 402 hba->ofld_conns_active++;
397 init_waitqueue_head(&bnx2i_ep->ofld_wait); 403 init_waitqueue_head(&bnx2i_ep->ofld_wait);
398 return ep; 404 return ep;
@@ -858,7 +864,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
858 mutex_init(&hba->net_dev_lock); 864 mutex_init(&hba->net_dev_lock);
859 init_waitqueue_head(&hba->eh_wait); 865 init_waitqueue_head(&hba->eh_wait);
860 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 866 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
861 hba->hba_shutdown_tmo = 20 * HZ; 867 hba->hba_shutdown_tmo = 30 * HZ;
862 hba->conn_teardown_tmo = 20 * HZ; 868 hba->conn_teardown_tmo = 20 * HZ;
863 hba->conn_ctx_destroy_tmo = 6 * HZ; 869 hba->conn_ctx_destroy_tmo = 6 * HZ;
864 } else { /* 5706/5708/5709 */ 870 } else { /* 5706/5708/5709 */
@@ -1208,6 +1214,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
1208 struct bnx2i_cmd *cmd = task->dd_data; 1214 struct bnx2i_cmd *cmd = task->dd_data;
1209 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1215 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1210 1216
1217 if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
1218 return -ENOMEM;
1219
1211 /* 1220 /*
1212 * If there is no scsi_cmnd this must be a mgmt task 1221 * If there is no scsi_cmnd this must be a mgmt task
1213 */ 1222 */
@@ -2156,7 +2165,7 @@ static struct scsi_host_template bnx2i_host_template = {
2156 .change_queue_depth = iscsi_change_queue_depth, 2165 .change_queue_depth = iscsi_change_queue_depth,
2157 .can_queue = 1024, 2166 .can_queue = 1024,
2158 .max_sectors = 127, 2167 .max_sectors = 127,
2159 .cmd_per_lun = 32, 2168 .cmd_per_lun = 24,
2160 .this_id = -1, 2169 .this_id = -1,
2161 .use_clustering = ENABLE_CLUSTERING, 2170 .use_clustering = ENABLE_CLUSTERING,
2162 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2171 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc23bd9480b2..155d7b9bdeae 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -137,6 +137,7 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
137static int fcoe_vport_disable(struct fc_vport *, bool disable); 137static int fcoe_vport_disable(struct fc_vport *, bool disable);
138static void fcoe_set_vport_symbolic_name(struct fc_vport *); 138static void fcoe_set_vport_symbolic_name(struct fc_vport *);
139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
140static int fcoe_validate_vport_create(struct fc_vport *);
140 141
141static struct libfc_function_template fcoe_libfc_fcn_templ = { 142static struct libfc_function_template fcoe_libfc_fcn_templ = {
142 .frame_send = fcoe_xmit, 143 .frame_send = fcoe_xmit,
@@ -2351,6 +2352,17 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2351 struct fcoe_interface *fcoe = port->priv; 2352 struct fcoe_interface *fcoe = port->priv;
2352 struct net_device *netdev = fcoe->netdev; 2353 struct net_device *netdev = fcoe->netdev;
2353 struct fc_lport *vn_port; 2354 struct fc_lport *vn_port;
2355 int rc;
2356 char buf[32];
2357
2358 rc = fcoe_validate_vport_create(vport);
2359 if (rc) {
2360 wwn_to_str(vport->port_name, buf, sizeof(buf));
2361 printk(KERN_ERR "fcoe: Failed to create vport, "
2362 "WWPN (0x%s) already exists\n",
2363 buf);
2364 return rc;
2365 }
2354 2366
2355 mutex_lock(&fcoe_config_mutex); 2367 mutex_lock(&fcoe_config_mutex);
2356 vn_port = fcoe_if_create(fcoe, &vport->dev, 1); 2368 vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
@@ -2497,3 +2509,49 @@ static void fcoe_set_port_id(struct fc_lport *lport,
2497 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2509 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2498 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2510 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2499} 2511}
2512
2513/**
2514 * fcoe_validate_vport_create() - Validate a vport before creating it
2515 * @vport: NPIV port to be created
2516 *
2517 * This routine is meant to add validation for a vport before creating it
2518 * via fcoe_vport_create().
2519 * Current validations are:
2520 * - WWPN supplied is unique for given lport
2521 *
2522 *
2523*/
2524static int fcoe_validate_vport_create(struct fc_vport *vport)
2525{
2526 struct Scsi_Host *shost = vport_to_shost(vport);
2527 struct fc_lport *n_port = shost_priv(shost);
2528 struct fc_lport *vn_port;
2529 int rc = 0;
2530 char buf[32];
2531
2532 mutex_lock(&n_port->lp_mutex);
2533
2534 wwn_to_str(vport->port_name, buf, sizeof(buf));
2535 /* Check if the wwpn is not same as that of the lport */
2536 if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
2537 FCOE_DBG("vport WWPN 0x%s is same as that of the "
2538 "base port WWPN\n", buf);
2539 rc = -EINVAL;
2540 goto out;
2541 }
2542
2543 /* Check if there is any existing vport with same wwpn */
2544 list_for_each_entry(vn_port, &n_port->vports, list) {
2545 if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
2546 FCOE_DBG("vport with given WWPN 0x%s already "
2547 "exists\n", buf);
2548 rc = -EINVAL;
2549 break;
2550 }
2551 }
2552
2553out:
2554 mutex_unlock(&n_port->lp_mutex);
2555
2556 return rc;
2557}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 408a6fd78fb4..c4a93993c0cf 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -99,4 +99,14 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
99 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; 99 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
100} 100}
101 101
102static inline void wwn_to_str(u64 wwn, char *buf, int len)
103{
104 u8 wwpn[8];
105
106 u64_to_wwn(wwn, wwpn);
107 snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
108 wwpn[0], wwpn[1], wwpn[2], wwpn[3],
109 wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
110}
111
102#endif /* _FCOE_H_ */ 112#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 229e4af5508a..c74c4b8e71ef 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1173,7 +1173,9 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1173 struct fc_lport *lport = fip->lp; 1173 struct fc_lport *lport = fip->lp;
1174 struct fc_lport *vn_port = NULL; 1174 struct fc_lport *vn_port = NULL;
1175 u32 desc_mask; 1175 u32 desc_mask;
1176 int is_vn_port = 0; 1176 int num_vlink_desc;
1177 int reset_phys_port = 0;
1178 struct fip_vn_desc **vlink_desc_arr = NULL;
1177 1179
1178 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); 1180 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
1179 1181
@@ -1183,70 +1185,73 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1183 /* 1185 /*
1184 * mask of required descriptors. Validating each one clears its bit. 1186 * mask of required descriptors. Validating each one clears its bit.
1185 */ 1187 */
1186 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID); 1188 desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
1187 1189
1188 rlen = ntohs(fh->fip_dl_len) * FIP_BPW; 1190 rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
1189 desc = (struct fip_desc *)(fh + 1); 1191 desc = (struct fip_desc *)(fh + 1);
1192
1193 /*
1194 * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen'
1195 * before determining max Vx_Port descriptor but a buggy FCF could have
1196 * omited either or both MAC Address and Name Identifier descriptors
1197 */
1198 num_vlink_desc = rlen / sizeof(*vp);
1199 if (num_vlink_desc)
1200 vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
1201 GFP_ATOMIC);
1202 if (!vlink_desc_arr)
1203 return;
1204 num_vlink_desc = 0;
1205
1190 while (rlen >= sizeof(*desc)) { 1206 while (rlen >= sizeof(*desc)) {
1191 dlen = desc->fip_dlen * FIP_BPW; 1207 dlen = desc->fip_dlen * FIP_BPW;
1192 if (dlen > rlen) 1208 if (dlen > rlen)
1193 return; 1209 goto err;
1194 /* Drop CVL if there are duplicate critical descriptors */ 1210 /* Drop CVL if there are duplicate critical descriptors */
1195 if ((desc->fip_dtype < 32) && 1211 if ((desc->fip_dtype < 32) &&
1212 (desc->fip_dtype != FIP_DT_VN_ID) &&
1196 !(desc_mask & 1U << desc->fip_dtype)) { 1213 !(desc_mask & 1U << desc->fip_dtype)) {
1197 LIBFCOE_FIP_DBG(fip, "Duplicate Critical " 1214 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
1198 "Descriptors in FIP CVL\n"); 1215 "Descriptors in FIP CVL\n");
1199 return; 1216 goto err;
1200 } 1217 }
1201 switch (desc->fip_dtype) { 1218 switch (desc->fip_dtype) {
1202 case FIP_DT_MAC: 1219 case FIP_DT_MAC:
1203 mp = (struct fip_mac_desc *)desc; 1220 mp = (struct fip_mac_desc *)desc;
1204 if (dlen < sizeof(*mp)) 1221 if (dlen < sizeof(*mp))
1205 return; 1222 goto err;
1206 if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) 1223 if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
1207 return; 1224 goto err;
1208 desc_mask &= ~BIT(FIP_DT_MAC); 1225 desc_mask &= ~BIT(FIP_DT_MAC);
1209 break; 1226 break;
1210 case FIP_DT_NAME: 1227 case FIP_DT_NAME:
1211 wp = (struct fip_wwn_desc *)desc; 1228 wp = (struct fip_wwn_desc *)desc;
1212 if (dlen < sizeof(*wp)) 1229 if (dlen < sizeof(*wp))
1213 return; 1230 goto err;
1214 if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) 1231 if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
1215 return; 1232 goto err;
1216 desc_mask &= ~BIT(FIP_DT_NAME); 1233 desc_mask &= ~BIT(FIP_DT_NAME);
1217 break; 1234 break;
1218 case FIP_DT_VN_ID: 1235 case FIP_DT_VN_ID:
1219 vp = (struct fip_vn_desc *)desc; 1236 vp = (struct fip_vn_desc *)desc;
1220 if (dlen < sizeof(*vp)) 1237 if (dlen < sizeof(*vp))
1221 return; 1238 goto err;
1222 if (compare_ether_addr(vp->fd_mac, 1239 vlink_desc_arr[num_vlink_desc++] = vp;
1223 fip->get_src_addr(lport)) == 0 && 1240 vn_port = fc_vport_id_lookup(lport,
1224 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && 1241 ntoh24(vp->fd_fc_id));
1225 ntoh24(vp->fd_fc_id) == lport->port_id) { 1242 if (vn_port && (vn_port == lport)) {
1226 desc_mask &= ~BIT(FIP_DT_VN_ID); 1243 mutex_lock(&fip->ctlr_mutex);
1227 break; 1244 per_cpu_ptr(lport->dev_stats,
1245 get_cpu())->VLinkFailureCount++;
1246 put_cpu();
1247 fcoe_ctlr_reset(fip);
1248 mutex_unlock(&fip->ctlr_mutex);
1228 } 1249 }
1229 /* check if clr_vlink is for NPIV port */
1230 mutex_lock(&lport->lp_mutex);
1231 list_for_each_entry(vn_port, &lport->vports, list) {
1232 if (compare_ether_addr(vp->fd_mac,
1233 fip->get_src_addr(vn_port)) == 0 &&
1234 (get_unaligned_be64(&vp->fd_wwpn)
1235 == vn_port->wwpn) &&
1236 (ntoh24(vp->fd_fc_id) ==
1237 fc_host_port_id(vn_port->host))) {
1238 desc_mask &= ~BIT(FIP_DT_VN_ID);
1239 is_vn_port = 1;
1240 break;
1241 }
1242 }
1243 mutex_unlock(&lport->lp_mutex);
1244
1245 break; 1250 break;
1246 default: 1251 default:
1247 /* standard says ignore unknown descriptors >= 128 */ 1252 /* standard says ignore unknown descriptors >= 128 */
1248 if (desc->fip_dtype < FIP_DT_VENDOR_BASE) 1253 if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
1249 return; 1254 goto err;
1250 break; 1255 break;
1251 } 1256 }
1252 desc = (struct fip_desc *)((char *)desc + dlen); 1257 desc = (struct fip_desc *)((char *)desc + dlen);
@@ -1256,26 +1261,68 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1256 /* 1261 /*
1257 * reset only if all required descriptors were present and valid. 1262 * reset only if all required descriptors were present and valid.
1258 */ 1263 */
1259 if (desc_mask) { 1264 if (desc_mask)
1260 LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n", 1265 LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
1261 desc_mask); 1266 desc_mask);
1267 else if (!num_vlink_desc) {
1268 LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n");
1269 /*
1270 * No Vx_Port description. Clear all NPIV ports,
1271 * followed by physical port
1272 */
1273 mutex_lock(&lport->lp_mutex);
1274 list_for_each_entry(vn_port, &lport->vports, list)
1275 fc_lport_reset(vn_port);
1276 mutex_unlock(&lport->lp_mutex);
1277
1278 mutex_lock(&fip->ctlr_mutex);
1279 per_cpu_ptr(lport->dev_stats,
1280 get_cpu())->VLinkFailureCount++;
1281 put_cpu();
1282 fcoe_ctlr_reset(fip);
1283 mutex_unlock(&fip->ctlr_mutex);
1284
1285 fc_lport_reset(fip->lp);
1286 fcoe_ctlr_solicit(fip, NULL);
1262 } else { 1287 } else {
1263 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); 1288 int i;
1264 1289
1265 if (is_vn_port) 1290 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
1266 fc_lport_reset(vn_port); 1291 for (i = 0; i < num_vlink_desc; i++) {
1267 else { 1292 vp = vlink_desc_arr[i];
1268 mutex_lock(&fip->ctlr_mutex); 1293 vn_port = fc_vport_id_lookup(lport,
1269 per_cpu_ptr(lport->dev_stats, 1294 ntoh24(vp->fd_fc_id));
1270 get_cpu())->VLinkFailureCount++; 1295 if (!vn_port)
1271 put_cpu(); 1296 continue;
1272 fcoe_ctlr_reset(fip); 1297
1273 mutex_unlock(&fip->ctlr_mutex); 1298 /*
1299 * 'port_id' is already validated, check MAC address and
1300 * wwpn
1301 */
1302 if (compare_ether_addr(fip->get_src_addr(vn_port),
1303 vp->fd_mac) != 0 ||
1304 get_unaligned_be64(&vp->fd_wwpn) !=
1305 vn_port->wwpn)
1306 continue;
1307
1308 if (vn_port == lport)
1309 /*
1310 * Physical port, defer processing till all
1311 * listed NPIV ports are cleared
1312 */
1313 reset_phys_port = 1;
1314 else /* NPIV port */
1315 fc_lport_reset(vn_port);
1316 }
1274 1317
1318 if (reset_phys_port) {
1275 fc_lport_reset(fip->lp); 1319 fc_lport_reset(fip->lp);
1276 fcoe_ctlr_solicit(fip, NULL); 1320 fcoe_ctlr_solicit(fip, NULL);
1277 } 1321 }
1278 } 1322 }
1323
1324err:
1325 kfree(vlink_desc_arr);
1279} 1326}
1280 1327
1281/** 1328/**
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f81f77c8569e..41068e8748e7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -544,16 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
544 struct fcoe_transport *ft = NULL; 544 struct fcoe_transport *ft = NULL;
545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg; 545 enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
546 546
547#ifdef CONFIG_LIBFCOE_MODULE
548 /*
549 * Make sure the module has been initialized, and is not about to be
550 * removed. Module parameter sysfs files are writable before the
551 * module_init function is called and after module_exit.
552 */
553 if (THIS_MODULE->state != MODULE_STATE_LIVE)
554 goto out_nodev;
555#endif
556
557 mutex_lock(&ft_mutex); 547 mutex_lock(&ft_mutex);
558 548
559 netdev = fcoe_if_to_netdev(buffer); 549 netdev = fcoe_if_to_netdev(buffer);
@@ -618,16 +608,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
618 struct net_device *netdev = NULL; 608 struct net_device *netdev = NULL;
619 struct fcoe_transport *ft = NULL; 609 struct fcoe_transport *ft = NULL;
620 610
621#ifdef CONFIG_LIBFCOE_MODULE
622 /*
623 * Make sure the module has been initialized, and is not about to be
624 * removed. Module parameter sysfs files are writable before the
625 * module_init function is called and after module_exit.
626 */
627 if (THIS_MODULE->state != MODULE_STATE_LIVE)
628 goto out_nodev;
629#endif
630
631 mutex_lock(&ft_mutex); 611 mutex_lock(&ft_mutex);
632 612
633 netdev = fcoe_if_to_netdev(buffer); 613 netdev = fcoe_if_to_netdev(buffer);
@@ -672,16 +652,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
672 struct net_device *netdev = NULL; 652 struct net_device *netdev = NULL;
673 struct fcoe_transport *ft = NULL; 653 struct fcoe_transport *ft = NULL;
674 654
675#ifdef CONFIG_LIBFCOE_MODULE
676 /*
677 * Make sure the module has been initialized, and is not about to be
678 * removed. Module parameter sysfs files are writable before the
679 * module_init function is called and after module_exit.
680 */
681 if (THIS_MODULE->state != MODULE_STATE_LIVE)
682 goto out_nodev;
683#endif
684
685 mutex_lock(&ft_mutex); 655 mutex_lock(&ft_mutex);
686 656
687 netdev = fcoe_if_to_netdev(buffer); 657 netdev = fcoe_if_to_netdev(buffer);
@@ -720,16 +690,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
720 struct net_device *netdev = NULL; 690 struct net_device *netdev = NULL;
721 struct fcoe_transport *ft = NULL; 691 struct fcoe_transport *ft = NULL;
722 692
723#ifdef CONFIG_LIBFCOE_MODULE
724 /*
725 * Make sure the module has been initialized, and is not about to be
726 * removed. Module parameter sysfs files are writable before the
727 * module_init function is called and after module_exit.
728 */
729 if (THIS_MODULE->state != MODULE_STATE_LIVE)
730 goto out_nodev;
731#endif
732
733 mutex_lock(&ft_mutex); 693 mutex_lock(&ft_mutex);
734 694
735 netdev = fcoe_if_to_netdev(buffer); 695 netdev = fcoe_if_to_netdev(buffer);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 12868ca46110..888086c4e709 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5149,21 +5149,21 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5149 5149
5150 if (ipr_cmd != NULL) { 5150 if (ipr_cmd != NULL) {
5151 /* Clear the PCI interrupt */ 5151 /* Clear the PCI interrupt */
5152 num_hrrq = 0;
5152 do { 5153 do {
5153 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5154 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5154 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5155 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5155 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 5156 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5156 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 5157 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5157 5158
5158 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
5159 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5160 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5161 return IRQ_HANDLED;
5162 }
5163
5164 } else if (rc == IRQ_NONE && irq_none == 0) { 5159 } else if (rc == IRQ_NONE && irq_none == 0) {
5165 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5160 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5166 irq_none++; 5161 irq_none++;
5162 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5163 int_reg & IPR_PCII_HRRQ_UPDATED) {
5164 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5165 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5166 return IRQ_HANDLED;
5167 } else 5167 } else
5168 break; 5168 break;
5169 } 5169 }
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 911b2736cafa..b9cb8140b398 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -205,6 +205,7 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
205 default: 205 default:
206 FC_DISC_DBG(disc, "Received an unsupported request, " 206 FC_DISC_DBG(disc, "Received an unsupported request, "
207 "the opcode is (%x)\n", op); 207 "the opcode is (%x)\n", op);
208 fc_frame_free(fp);
208 break; 209 break;
209 } 210 }
210} 211}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 77035a746f60..3b8a6451ea28 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1434,6 +1434,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1434 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1434 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1435 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1435 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1436 spin_lock_bh(&ep->ex_lock); 1436 spin_lock_bh(&ep->ex_lock);
1437 resp = ep->resp;
1437 rc = fc_exch_done_locked(ep); 1438 rc = fc_exch_done_locked(ep);
1438 WARN_ON(fc_seq_exch(sp) != ep); 1439 WARN_ON(fc_seq_exch(sp) != ep);
1439 spin_unlock_bh(&ep->ex_lock); 1440 spin_unlock_bh(&ep->ex_lock);
@@ -1978,6 +1979,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1978 spin_unlock_bh(&ep->ex_lock); 1979 spin_unlock_bh(&ep->ex_lock);
1979 return sp; 1980 return sp;
1980err: 1981err:
1982 fc_fcp_ddp_done(fr_fsp(fp));
1981 rc = fc_exch_done_locked(ep); 1983 rc = fc_exch_done_locked(ep);
1982 spin_unlock_bh(&ep->ex_lock); 1984 spin_unlock_bh(&ep->ex_lock);
1983 if (!rc) 1985 if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2a3a4720a771..9cd2149519ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -312,7 +312,7 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
312 * DDP related resources for a fcp_pkt 312 * DDP related resources for a fcp_pkt
313 * @fsp: The FCP packet that DDP had been used on 313 * @fsp: The FCP packet that DDP had been used on
314 */ 314 */
315static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 315void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
316{ 316{
317 struct fc_lport *lport; 317 struct fc_lport *lport;
318 318
@@ -681,8 +681,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
681 error = lport->tt.seq_send(lport, seq, fp); 681 error = lport->tt.seq_send(lport, seq, fp);
682 if (error) { 682 if (error) {
683 WARN_ON(1); /* send error should be rare */ 683 WARN_ON(1); /* send error should be rare */
684 fc_fcp_retry_cmd(fsp); 684 return error;
685 return 0;
686 } 685 }
687 fp = NULL; 686 fp = NULL;
688 } 687 }
@@ -1673,7 +1672,8 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1673 FC_FCTL_REQ, 0); 1672 FC_FCTL_REQ, 0);
1674 1673
1675 rec_tov = get_fsp_rec_tov(fsp); 1674 rec_tov = get_fsp_rec_tov(fsp);
1676 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, 1675 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
1676 fc_fcp_pkt_destroy,
1677 fsp, jiffies_to_msecs(rec_tov)); 1677 fsp, jiffies_to_msecs(rec_tov));
1678 if (!seq) 1678 if (!seq)
1679 goto retry; 1679 goto retry;
@@ -1720,7 +1720,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1720 return; 1720 return;
1721 } 1721 }
1722 1722
1723 fsp->recov_seq = NULL;
1724 switch (fc_frame_payload_op(fp)) { 1723 switch (fc_frame_payload_op(fp)) {
1725 case ELS_LS_ACC: 1724 case ELS_LS_ACC:
1726 fsp->recov_retry = 0; 1725 fsp->recov_retry = 0;
@@ -1732,10 +1731,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1732 break; 1731 break;
1733 } 1732 }
1734 fc_fcp_unlock_pkt(fsp); 1733 fc_fcp_unlock_pkt(fsp);
1735 fsp->lp->tt.exch_done(seq);
1736out: 1734out:
1735 fsp->lp->tt.exch_done(seq);
1737 fc_frame_free(fp); 1736 fc_frame_free(fp);
1738 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
1739} 1737}
1740 1738
1741/** 1739/**
@@ -1747,8 +1745,6 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1747{ 1745{
1748 if (fc_fcp_lock_pkt(fsp)) 1746 if (fc_fcp_lock_pkt(fsp))
1749 goto out; 1747 goto out;
1750 fsp->lp->tt.exch_done(fsp->recov_seq);
1751 fsp->recov_seq = NULL;
1752 switch (PTR_ERR(fp)) { 1748 switch (PTR_ERR(fp)) {
1753 case -FC_EX_TIMEOUT: 1749 case -FC_EX_TIMEOUT:
1754 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1750 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
@@ -1764,7 +1760,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1764 } 1760 }
1765 fc_fcp_unlock_pkt(fsp); 1761 fc_fcp_unlock_pkt(fsp);
1766out: 1762out:
1767 fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */ 1763 fsp->lp->tt.exch_done(fsp->recov_seq);
1768} 1764}
1769 1765
1770/** 1766/**
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index fedc819d70c0..c7d071289af5 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -108,6 +108,7 @@ extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
108 * Set up direct-data placement for this I/O request 108 * Set up direct-data placement for this I/O request
109 */ 109 */
110void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); 110void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
111void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
111 112
112/* 113/*
113 * Module setup functions 114 * Module setup functions
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 31fc21f4d831..db9238f2ecb8 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -99,19 +99,29 @@ static void sas_ata_task_done(struct sas_task *task)
99 struct sas_ha_struct *sas_ha; 99 struct sas_ha_struct *sas_ha;
100 enum ata_completion_errors ac; 100 enum ata_completion_errors ac;
101 unsigned long flags; 101 unsigned long flags;
102 struct ata_link *link;
102 103
103 if (!qc) 104 if (!qc)
104 goto qc_already_gone; 105 goto qc_already_gone;
105 106
106 dev = qc->ap->private_data; 107 dev = qc->ap->private_data;
107 sas_ha = dev->port->ha; 108 sas_ha = dev->port->ha;
109 link = &dev->sata_dev.ap->link;
108 110
109 spin_lock_irqsave(dev->sata_dev.ap->lock, flags); 111 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
110 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || 112 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
111 ((stat->stat == SAM_STAT_CHECK_CONDITION && 113 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
112 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { 114 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
113 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); 115 ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
114 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); 116
117 if (!link->sactive) {
118 qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
119 } else {
120 link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
121 if (unlikely(link->eh_info.err_mask))
122 qc->flags |= ATA_QCFLAG_FAILED;
123 }
124
115 dev->sata_dev.sstatus = resp->sstatus; 125 dev->sata_dev.sstatus = resp->sstatus;
116 dev->sata_dev.serror = resp->serror; 126 dev->sata_dev.serror = resp->serror;
117 dev->sata_dev.scontrol = resp->scontrol; 127 dev->sata_dev.scontrol = resp->scontrol;
@@ -121,7 +131,13 @@ static void sas_ata_task_done(struct sas_task *task)
121 SAS_DPRINTK("%s: SAS error %x\n", __func__, 131 SAS_DPRINTK("%s: SAS error %x\n", __func__,
122 stat->stat); 132 stat->stat);
123 /* We saw a SAS error. Send a vague error. */ 133 /* We saw a SAS error. Send a vague error. */
124 qc->err_mask = ac; 134 if (!link->sactive) {
135 qc->err_mask = ac;
136 } else {
137 link->eh_info.err_mask |= AC_ERR_DEV;
138 qc->flags |= ATA_QCFLAG_FAILED;
139 }
140
125 dev->sata_dev.tf.feature = 0x04; /* status err */ 141 dev->sata_dev.tf.feature = 0x04; /* status err */
126 dev->sata_dev.tf.command = ATA_ERR; 142 dev->sata_dev.tf.command = ATA_ERR;
127 } 143 }
@@ -279,6 +295,44 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
279 return ret; 295 return ret;
280} 296}
281 297
298static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class,
299 unsigned long deadline)
300{
301 struct ata_port *ap = link->ap;
302 struct domain_device *dev = ap->private_data;
303 struct sas_internal *i =
304 to_sas_internal(dev->port->ha->core.shost->transportt);
305 int res = TMF_RESP_FUNC_FAILED;
306 int ret = 0;
307
308 if (i->dft->lldd_ata_soft_reset)
309 res = i->dft->lldd_ata_soft_reset(dev);
310
311 if (res != TMF_RESP_FUNC_COMPLETE) {
312 SAS_DPRINTK("%s: Unable to soft reset\n", __func__);
313 ret = -EAGAIN;
314 }
315
316 switch (dev->sata_dev.command_set) {
317 case ATA_COMMAND_SET:
318 SAS_DPRINTK("%s: Found ATA device.\n", __func__);
319 *class = ATA_DEV_ATA;
320 break;
321 case ATAPI_COMMAND_SET:
322 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
323 *class = ATA_DEV_ATAPI;
324 break;
325 default:
326 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
327 __func__, dev->sata_dev.command_set);
328 *class = ATA_DEV_UNKNOWN;
329 break;
330 }
331
332 ap->cbl = ATA_CBL_SATA;
333 return ret;
334}
335
282static void sas_ata_post_internal(struct ata_queued_cmd *qc) 336static void sas_ata_post_internal(struct ata_queued_cmd *qc)
283{ 337{
284 if (qc->flags & ATA_QCFLAG_FAILED) 338 if (qc->flags & ATA_QCFLAG_FAILED)
@@ -309,7 +363,7 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
309 363
310static struct ata_port_operations sas_sata_ops = { 364static struct ata_port_operations sas_sata_ops = {
311 .prereset = ata_std_prereset, 365 .prereset = ata_std_prereset,
312 .softreset = NULL, 366 .softreset = sas_ata_soft_reset,
313 .hardreset = sas_ata_hard_reset, 367 .hardreset = sas_ata_hard_reset,
314 .postreset = ata_std_postreset, 368 .postreset = ata_std_postreset,
315 .error_handler = ata_std_error_handler, 369 .error_handler = ata_std_error_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 8b538bd1ff2b..14e21b5fb8ba 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -57,7 +57,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha);
57int sas_init_events(struct sas_ha_struct *sas_ha); 57int sas_init_events(struct sas_ha_struct *sas_ha);
58void sas_shutdown_queue(struct sas_ha_struct *sas_ha); 58void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
59 59
60void sas_deform_port(struct asd_sas_phy *phy); 60void sas_deform_port(struct asd_sas_phy *phy, int gone);
61 61
62void sas_porte_bytes_dmaed(struct work_struct *work); 62void sas_porte_bytes_dmaed(struct work_struct *work);
63void sas_porte_broadcast_rcvd(struct work_struct *work); 63void sas_porte_broadcast_rcvd(struct work_struct *work);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index b459c4b635b1..e0f5018e9071 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -39,7 +39,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
40 &phy->phy_events_pending); 40 &phy->phy_events_pending);
41 phy->error = 0; 41 phy->error = 0;
42 sas_deform_port(phy); 42 sas_deform_port(phy, 1);
43} 43}
44 44
45static void sas_phye_oob_done(struct work_struct *work) 45static void sas_phye_oob_done(struct work_struct *work)
@@ -66,7 +66,7 @@ static void sas_phye_oob_error(struct work_struct *work)
66 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock, 66 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
67 &phy->phy_events_pending); 67 &phy->phy_events_pending);
68 68
69 sas_deform_port(phy); 69 sas_deform_port(phy, 1);
70 70
71 if (!port && phy->enabled && i->dft->lldd_control_phy) { 71 if (!port && phy->enabled && i->dft->lldd_control_phy) {
72 phy->error++; 72 phy->error++;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 5257fdfe699a..42fd1f25b664 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -57,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
57 57
58 if (port) { 58 if (port) {
59 if (!phy_is_wideport_member(port, phy)) 59 if (!phy_is_wideport_member(port, phy))
60 sas_deform_port(phy); 60 sas_deform_port(phy, 0);
61 else { 61 else {
62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
63 __func__, phy->id, phy->port->id, 63 __func__, phy->id, phy->port->id,
@@ -153,28 +153,31 @@ static void sas_form_port(struct asd_sas_phy *phy)
153 * This is called when the physical link to the other phy has been 153 * This is called when the physical link to the other phy has been
154 * lost (on this phy), in Event thread context. We cannot delay here. 154 * lost (on this phy), in Event thread context. We cannot delay here.
155 */ 155 */
156void sas_deform_port(struct asd_sas_phy *phy) 156void sas_deform_port(struct asd_sas_phy *phy, int gone)
157{ 157{
158 struct sas_ha_struct *sas_ha = phy->ha; 158 struct sas_ha_struct *sas_ha = phy->ha;
159 struct asd_sas_port *port = phy->port; 159 struct asd_sas_port *port = phy->port;
160 struct sas_internal *si = 160 struct sas_internal *si =
161 to_sas_internal(sas_ha->core.shost->transportt); 161 to_sas_internal(sas_ha->core.shost->transportt);
162 struct domain_device *dev;
162 unsigned long flags; 163 unsigned long flags;
163 164
164 if (!port) 165 if (!port)
165 return; /* done by a phy event */ 166 return; /* done by a phy event */
166 167
167 if (port->port_dev) 168 dev = port->port_dev;
168 port->port_dev->pathways--; 169 if (dev)
170 dev->pathways--;
169 171
170 if (port->num_phys == 1) { 172 if (port->num_phys == 1) {
173 if (dev && gone)
174 dev->gone = 1;
171 sas_unregister_domain_devices(port); 175 sas_unregister_domain_devices(port);
172 sas_port_delete(port->port); 176 sas_port_delete(port->port);
173 port->port = NULL; 177 port->port = NULL;
174 } else 178 } else
175 sas_port_delete_phy(port->port, phy->phy); 179 sas_port_delete_phy(port->port, phy->phy);
176 180
177
178 if (si->dft->lldd_port_deformed) 181 if (si->dft->lldd_port_deformed)
179 si->dft->lldd_port_deformed(phy); 182 si->dft->lldd_port_deformed(phy);
180 183
@@ -244,7 +247,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
244 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 247 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
245 &phy->port_events_pending); 248 &phy->port_events_pending);
246 249
247 sas_deform_port(phy); 250 sas_deform_port(phy, 1);
248} 251}
249 252
250void sas_porte_timer_event(struct work_struct *work) 253void sas_porte_timer_event(struct work_struct *work)
@@ -256,7 +259,7 @@ void sas_porte_timer_event(struct work_struct *work)
256 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 259 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
257 &phy->port_events_pending); 260 &phy->port_events_pending);
258 261
259 sas_deform_port(phy); 262 sas_deform_port(phy, 1);
260} 263}
261 264
262void sas_porte_hard_reset(struct work_struct *work) 265void sas_porte_hard_reset(struct work_struct *work)
@@ -268,7 +271,7 @@ void sas_porte_hard_reset(struct work_struct *work)
268 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 271 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
269 &phy->port_events_pending); 272 &phy->port_events_pending);
270 273
271 sas_deform_port(phy); 274 sas_deform_port(phy, 1);
272} 275}
273 276
274/* ---------- SAS port registration ---------- */ 277/* ---------- SAS port registration ---------- */
@@ -306,6 +309,6 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
306 309
307 for (i = 0; i < sas_ha->num_phys; i++) 310 for (i = 0; i < sas_ha->num_phys; i++)
308 if (sas_ha->sas_phy[i]->port) 311 if (sas_ha->sas_phy[i]->port)
309 sas_deform_port(sas_ha->sas_phy[i]); 312 sas_deform_port(sas_ha->sas_phy[i], 0);
310 313
311} 314}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f6e189f40917..eeba76cdf774 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -207,6 +207,13 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
207 struct sas_ha_struct *sas_ha = dev->port->ha; 207 struct sas_ha_struct *sas_ha = dev->port->ha;
208 struct sas_task *task; 208 struct sas_task *task;
209 209
210 /* If the device fell off, no sense in issuing commands */
211 if (dev->gone) {
212 cmd->result = DID_BAD_TARGET << 16;
213 scsi_done(cmd);
214 goto out;
215 }
216
210 if (dev_is_sata(dev)) { 217 if (dev_is_sata(dev)) {
211 unsigned long flags; 218 unsigned long flags;
212 219
@@ -216,13 +223,6 @@ static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
216 goto out; 223 goto out;
217 } 224 }
218 225
219 /* If the device fell off, no sense in issuing commands */
220 if (dev->gone) {
221 cmd->result = DID_BAD_TARGET << 16;
222 scsi_done(cmd);
223 goto out;
224 }
225
226 res = -ENOMEM; 226 res = -ENOMEM;
227 task = sas_create_task(cmd, dev, GFP_ATOMIC); 227 task = sas_create_task(cmd, dev, GFP_ATOMIC);
228 if (!task) 228 if (!task)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 02d53d89534f..8ec2c86a49d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -41,6 +41,7 @@ struct lpfc_sli2_slim;
41 downloads using bsg */ 41 downloads using bsg */
42#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 42#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
43#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 43#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
44#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
44#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
45#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 46#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
46#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 47#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
@@ -486,6 +487,42 @@ struct unsol_rcv_ct_ctx {
486 (1 << LPFC_USER_LINK_SPEED_AUTO)) 487 (1 << LPFC_USER_LINK_SPEED_AUTO))
487#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16" 488#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
488 489
490enum nemb_type {
491 nemb_mse = 1,
492 nemb_hbd
493};
494
495enum mbox_type {
496 mbox_rd = 1,
497 mbox_wr
498};
499
500enum dma_type {
501 dma_mbox = 1,
502 dma_ebuf
503};
504
505enum sta_type {
506 sta_pre_addr = 1,
507 sta_pos_addr
508};
509
510struct lpfc_mbox_ext_buf_ctx {
511 uint32_t state;
512#define LPFC_BSG_MBOX_IDLE 0
513#define LPFC_BSG_MBOX_HOST 1
514#define LPFC_BSG_MBOX_PORT 2
515#define LPFC_BSG_MBOX_DONE 3
516#define LPFC_BSG_MBOX_ABTS 4
517 enum nemb_type nembType;
518 enum mbox_type mboxType;
519 uint32_t numBuf;
520 uint32_t mbxTag;
521 uint32_t seqNum;
522 struct lpfc_dmabuf *mbx_dmabuf;
523 struct list_head ext_dmabuf_list;
524};
525
489struct lpfc_hba { 526struct lpfc_hba {
490 /* SCSI interface function jump table entries */ 527 /* SCSI interface function jump table entries */
491 int (*lpfc_new_scsi_buf) 528 int (*lpfc_new_scsi_buf)
@@ -589,6 +626,7 @@ struct lpfc_hba {
589 626
590 MAILBOX_t *mbox; 627 MAILBOX_t *mbox;
591 uint32_t *mbox_ext; 628 uint32_t *mbox_ext;
629 struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
592 uint32_t ha_copy; 630 uint32_t ha_copy;
593 struct _PCB *pcb; 631 struct _PCB *pcb;
594 struct _IOCB *IOCBs; 632 struct _IOCB *IOCBs;
@@ -659,6 +697,7 @@ struct lpfc_hba {
659 uint32_t cfg_hostmem_hgp; 697 uint32_t cfg_hostmem_hgp;
660 uint32_t cfg_log_verbose; 698 uint32_t cfg_log_verbose;
661 uint32_t cfg_aer_support; 699 uint32_t cfg_aer_support;
700 uint32_t cfg_sriov_nr_virtfn;
662 uint32_t cfg_iocb_cnt; 701 uint32_t cfg_iocb_cnt;
663 uint32_t cfg_suppress_link_up; 702 uint32_t cfg_suppress_link_up;
664#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ 703#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -706,7 +745,6 @@ struct lpfc_hba {
706 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */ 745 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
707 746
708 int brd_no; /* FC board number */ 747 int brd_no; /* FC board number */
709
710 char SerialNumber[32]; /* adapter Serial Number */ 748 char SerialNumber[32]; /* adapter Serial Number */
711 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */ 749 char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
712 char ModelDesc[256]; /* Model Description */ 750 char ModelDesc[256]; /* Model Description */
@@ -778,6 +816,9 @@ struct lpfc_hba {
778 uint16_t vpi_base; 816 uint16_t vpi_base;
779 uint16_t vfi_base; 817 uint16_t vfi_base;
780 unsigned long *vpi_bmask; /* vpi allocation table */ 818 unsigned long *vpi_bmask; /* vpi allocation table */
819 uint16_t *vpi_ids;
820 uint16_t vpi_count;
821 struct list_head lpfc_vpi_blk_list;
781 822
782 /* Data structure used by fabric iocb scheduler */ 823 /* Data structure used by fabric iocb scheduler */
783 struct list_head fabric_iocb_list; 824 struct list_head fabric_iocb_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8dcbf8fff673..135a53baa735 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,73 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
755} 755}
756 756
757/** 757/**
758 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
759 * @phba: lpfc_hba pointer.
760 *
761 * Description:
762 * Request SLI4 interface type-2 device to perform a physical register set
763 * access.
764 *
765 * Returns:
766 * zero for success
767 **/
768static ssize_t
769lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
770{
771 struct completion online_compl;
772 uint32_t reg_val;
773 int status = 0;
774 int rc;
775
776 if (!phba->cfg_enable_hba_reset)
777 return -EIO;
778
779 if ((phba->sli_rev < LPFC_SLI_REV4) ||
780 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
781 LPFC_SLI_INTF_IF_TYPE_2))
782 return -EPERM;
783
784 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
785
786 if (status != 0)
787 return status;
788
789 /* wait for the device to be quiesced before firmware reset */
790 msleep(100);
791
792 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
793 LPFC_CTL_PDEV_CTL_OFFSET);
794
795 if (opcode == LPFC_FW_DUMP)
796 reg_val |= LPFC_FW_DUMP_REQUEST;
797 else if (opcode == LPFC_FW_RESET)
798 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
799 else if (opcode == LPFC_DV_RESET)
800 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
801
802 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
803 LPFC_CTL_PDEV_CTL_OFFSET);
804 /* flush */
805 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
806
807 /* delay driver action following IF_TYPE_2 reset */
808 msleep(100);
809
810 init_completion(&online_compl);
811 rc = lpfc_workq_post_event(phba, &status, &online_compl,
812 LPFC_EVT_ONLINE);
813 if (rc == 0)
814 return -ENOMEM;
815
816 wait_for_completion(&online_compl);
817
818 if (status != 0)
819 return -EIO;
820
821 return 0;
822}
823
824/**
758 * lpfc_nport_evt_cnt_show - Return the number of nport events 825 * lpfc_nport_evt_cnt_show - Return the number of nport events
759 * @dev: class device that is converted into a Scsi_host. 826 * @dev: class device that is converted into a Scsi_host.
760 * @attr: device attribute, not used. 827 * @attr: device attribute, not used.
@@ -848,6 +915,12 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
848 return -EINVAL; 915 return -EINVAL;
849 else 916 else
850 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 917 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
918 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
919 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
920 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
921 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
922 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
923 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
851 else 924 else
852 return -EINVAL; 925 return -EINVAL;
853 926
@@ -1322,6 +1395,102 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
1322} 1395}
1323 1396
1324/** 1397/**
1398 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
1399 * @dev: class converted to a Scsi_host structure.
1400 * @attr: device attribute, not used.
1401 * @buf: on return contains the formatted support level.
1402 *
1403 * Description:
1404 * Returns the maximum number of virtual functions a physical function can
1405 * support, 0 will be returned if called on virtual function.
1406 *
1407 * Returns: size of formatted string.
1408 **/
1409static ssize_t
1410lpfc_sriov_hw_max_virtfn_show(struct device *dev,
1411 struct device_attribute *attr,
1412 char *buf)
1413{
1414 struct Scsi_Host *shost = class_to_shost(dev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba;
1417 struct pci_dev *pdev = phba->pcidev;
1418 union lpfc_sli4_cfg_shdr *shdr;
1419 uint32_t shdr_status, shdr_add_status;
1420 LPFC_MBOXQ_t *mboxq;
1421 struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
1422 struct lpfc_rsrc_desc_pcie *desc;
1423 uint32_t max_nr_virtfn;
1424 uint32_t desc_count;
1425 int length, rc, i;
1426
1427 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1428 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1429 LPFC_SLI_INTF_IF_TYPE_2))
1430 return -EPERM;
1431
1432 if (!pdev->is_physfn)
1433 return snprintf(buf, PAGE_SIZE, "%d\n", 0);
1434
1435 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1436 if (!mboxq)
1437 return -ENOMEM;
1438
1439 /* get the maximum number of virtfn support by physfn */
1440 length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
1441 sizeof(struct lpfc_sli4_cfg_mhdr));
1442 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
1443 LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
1444 length, LPFC_SLI4_MBX_EMBED);
1445 shdr = (union lpfc_sli4_cfg_shdr *)
1446 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
1447 bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
1448 phba->sli4_hba.iov.pf_number + 1);
1449
1450 get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
1451 bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
1452 LPFC_CFG_TYPE_CURRENT_ACTIVE);
1453
1454 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
1455 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
1456
1457 if (rc != MBX_TIMEOUT) {
1458 /* check return status */
1459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1461 &shdr->response);
1462 if (shdr_status || shdr_add_status || rc)
1463 goto error_out;
1464
1465 } else
1466 goto error_out;
1467
1468 desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
1469
1470 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
1471 desc = (struct lpfc_rsrc_desc_pcie *)
1472 &get_prof_cfg->u.response.prof_cfg.desc[i];
1473 if (LPFC_RSRC_DESC_TYPE_PCIE ==
1474 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
1475 max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
1476 desc);
1477 break;
1478 }
1479 }
1480
1481 if (i < LPFC_RSRC_DESC_MAX_NUM) {
1482 if (rc != MBX_TIMEOUT)
1483 mempool_free(mboxq, phba->mbox_mem_pool);
1484 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1485 }
1486
1487error_out:
1488 if (rc != MBX_TIMEOUT)
1489 mempool_free(mboxq, phba->mbox_mem_pool);
1490 return -EIO;
1491}
1492
1493/**
1325 * lpfc_param_show - Return a cfg attribute value in decimal 1494 * lpfc_param_show - Return a cfg attribute value in decimal
1326 * 1495 *
1327 * Description: 1496 * Description:
@@ -1762,6 +1931,8 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
1762static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); 1931static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
1763static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); 1932static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
1764static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); 1933static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
1934static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
1935 lpfc_sriov_hw_max_virtfn_show, NULL);
1765 1936
1766static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 1937static char *lpfc_soft_wwn_key = "C99G71SL8032A";
1767 1938
@@ -3014,7 +3185,7 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
3014 * 3185 *
3015 * @dev: class device that is converted into a Scsi_host. 3186 * @dev: class device that is converted into a Scsi_host.
3016 * @attr: device attribute, not used. 3187 * @attr: device attribute, not used.
3017 * @buf: containing the string "selective". 3188 * @buf: containing enable or disable aer flag.
3018 * @count: unused variable. 3189 * @count: unused variable.
3019 * 3190 *
3020 * Description: 3191 * Description:
@@ -3098,7 +3269,7 @@ lpfc_param_show(aer_support)
3098/** 3269/**
3099 * lpfc_aer_support_init - Set the initial adapters aer support flag 3270 * lpfc_aer_support_init - Set the initial adapters aer support flag
3100 * @phba: lpfc_hba pointer. 3271 * @phba: lpfc_hba pointer.
3101 * @val: link speed value. 3272 * @val: enable aer or disable aer flag.
3102 * 3273 *
3103 * Description: 3274 * Description:
3104 * If val is in a valid range [0,1], then set the adapter's initial 3275 * If val is in a valid range [0,1], then set the adapter's initial
@@ -3137,7 +3308,7 @@ static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
3137 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device 3308 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
3138 * @dev: class device that is converted into a Scsi_host. 3309 * @dev: class device that is converted into a Scsi_host.
3139 * @attr: device attribute, not used. 3310 * @attr: device attribute, not used.
3140 * @buf: containing the string "selective". 3311 * @buf: containing flag 1 for aer cleanup state.
3141 * @count: unused variable. 3312 * @count: unused variable.
3142 * 3313 *
3143 * Description: 3314 * Description:
@@ -3180,6 +3351,136 @@ lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
3180static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL, 3351static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
3181 lpfc_aer_cleanup_state); 3352 lpfc_aer_cleanup_state);
3182 3353
3354/**
3355 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
3356 *
3357 * @dev: class device that is converted into a Scsi_host.
3358 * @attr: device attribute, not used.
3359 * @buf: containing the string the number of vfs to be enabled.
3360 * @count: unused variable.
3361 *
3362 * Description:
3363 * When this api is called either through user sysfs, the driver shall
3364 * try to enable or disable SR-IOV virtual functions according to the
3365 * following:
3366 *
3367 * If zero virtual function has been enabled to the physical function,
3368 * the driver shall invoke the pci enable virtual function api trying
3369 * to enable the virtual functions. If the nr_vfn provided is greater
3370 * than the maximum supported, the maximum virtual function number will
3371 * be used for invoking the api; otherwise, the nr_vfn provided shall
3372 * be used for invoking the api. If the api call returned success, the
3373 * actual number of virtual functions enabled will be set to the driver
3374 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
3375 * cfg_sriov_nr_virtfn remains zero.
3376 *
3377 * If none-zero virtual functions have already been enabled to the
3378 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
3379 * -EINVAL will be returned and the driver does nothing;
3380 *
3381 * If the nr_vfn provided is zero and none-zero virtual functions have
3382 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
3383 * disabling virtual function api shall be invoded to disable all the
3384 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
3385 * zero. Otherwise, if zero virtual function has been enabled, do
3386 * nothing.
3387 *
3388 * Returns:
3389 * length of the buf on success if val is in range the intended mode
3390 * is supported.
3391 * -EINVAL if val out of range or intended mode is not supported.
3392 **/
3393static ssize_t
3394lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
3395 const char *buf, size_t count)
3396{
3397 struct Scsi_Host *shost = class_to_shost(dev);
3398 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3399 struct lpfc_hba *phba = vport->phba;
3400 struct pci_dev *pdev = phba->pcidev;
3401 int val = 0, rc = -EINVAL;
3402
3403 /* Sanity check on user data */
3404 if (!isdigit(buf[0]))
3405 return -EINVAL;
3406 if (sscanf(buf, "%i", &val) != 1)
3407 return -EINVAL;
3408 if (val < 0)
3409 return -EINVAL;
3410
3411 /* Request disabling virtual functions */
3412 if (val == 0) {
3413 if (phba->cfg_sriov_nr_virtfn > 0) {
3414 pci_disable_sriov(pdev);
3415 phba->cfg_sriov_nr_virtfn = 0;
3416 }
3417 return strlen(buf);
3418 }
3419
3420 /* Request enabling virtual functions */
3421 if (phba->cfg_sriov_nr_virtfn > 0) {
3422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3423 "3018 There are %d virtual functions "
3424 "enabled on physical function.\n",
3425 phba->cfg_sriov_nr_virtfn);
3426 return -EEXIST;
3427 }
3428
3429 if (val <= LPFC_MAX_VFN_PER_PFN)
3430 phba->cfg_sriov_nr_virtfn = val;
3431 else {
3432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3433 "3019 Enabling %d virtual functions is not "
3434 "allowed.\n", val);
3435 return -EINVAL;
3436 }
3437
3438 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
3439 if (rc) {
3440 phba->cfg_sriov_nr_virtfn = 0;
3441 rc = -EPERM;
3442 } else
3443 rc = strlen(buf);
3444
3445 return rc;
3446}
3447
3448static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
3449module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
3450MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
3451lpfc_param_show(sriov_nr_virtfn)
3452
3453/**
3454 * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
3455 * @phba: lpfc_hba pointer.
3456 * @val: link speed value.
3457 *
3458 * Description:
3459 * If val is in a valid range [0,255], then set the adapter's initial
3460 * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
3461 * number shall be used instead. It will be up to the driver's probe_one
3462 * routine to determine whether the device's SR-IOV is supported or not.
3463 *
3464 * Returns:
3465 * zero if val saved.
3466 * -EINVAL val out of range
3467 **/
3468static int
3469lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
3470{
3471 if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
3472 phba->cfg_sriov_nr_virtfn = val;
3473 return 0;
3474 }
3475
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "3017 Enabling %d virtual functions is not "
3478 "allowed.\n", val);
3479 return -EINVAL;
3480}
3481static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
3482 lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
3483
3183/* 3484/*
3184# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3485# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
3185# Value range is [2,3]. Default value is 3. 3486# Value range is [2,3]. Default value is 3.
@@ -3497,6 +3798,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3497 &dev_attr_lpfc_prot_sg_seg_cnt, 3798 &dev_attr_lpfc_prot_sg_seg_cnt,
3498 &dev_attr_lpfc_aer_support, 3799 &dev_attr_lpfc_aer_support,
3499 &dev_attr_lpfc_aer_state_cleanup, 3800 &dev_attr_lpfc_aer_state_cleanup,
3801 &dev_attr_lpfc_sriov_nr_virtfn,
3500 &dev_attr_lpfc_suppress_link_up, 3802 &dev_attr_lpfc_suppress_link_up,
3501 &dev_attr_lpfc_iocb_cnt, 3803 &dev_attr_lpfc_iocb_cnt,
3502 &dev_attr_iocb_hw, 3804 &dev_attr_iocb_hw,
@@ -3505,6 +3807,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3505 &dev_attr_lpfc_fips_level, 3807 &dev_attr_lpfc_fips_level,
3506 &dev_attr_lpfc_fips_rev, 3808 &dev_attr_lpfc_fips_rev,
3507 &dev_attr_lpfc_dss, 3809 &dev_attr_lpfc_dss,
3810 &dev_attr_lpfc_sriov_hw_max_virtfn,
3508 NULL, 3811 NULL,
3509}; 3812};
3510 3813
@@ -3961,7 +4264,7 @@ static struct bin_attribute sysfs_mbox_attr = {
3961 .name = "mbox", 4264 .name = "mbox",
3962 .mode = S_IRUSR | S_IWUSR, 4265 .mode = S_IRUSR | S_IWUSR,
3963 }, 4266 },
3964 .size = MAILBOX_CMD_SIZE, 4267 .size = MAILBOX_SYSFS_MAX,
3965 .read = sysfs_mbox_read, 4268 .read = sysfs_mbox_read,
3966 .write = sysfs_mbox_write, 4269 .write = sysfs_mbox_write,
3967}; 4270};
@@ -4705,6 +5008,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4705 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 5008 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4706 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 5009 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4707 lpfc_aer_support_init(phba, lpfc_aer_support); 5010 lpfc_aer_support_init(phba, lpfc_aer_support);
5011 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
4708 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); 5012 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
4709 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); 5013 lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
4710 phba->cfg_enable_dss = 1; 5014 phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..7fb0ba4cbfa7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/list.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
79struct lpfc_bsg_mbox { 80struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq; 81 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb; 82 MAILBOX_t *mb;
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */ 83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */ 84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */ 85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */ 86 uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
332 cmd->ulpLe = 1; 332 cmd->ulpLe = 1;
333 cmd->ulpClass = CLASS3; 333 cmd->ulpClass = CLASS3;
334 cmd->ulpContext = ndlp->nlp_rpi; 334 cmd->ulpContext = ndlp->nlp_rpi;
335 if (phba->sli_rev == LPFC_SLI_REV4)
336 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
335 cmd->ulpOwner = OWN_CHIP; 337 cmd->ulpOwner = OWN_CHIP;
336 cmdiocbq->vport = phba->pport; 338 cmdiocbq->vport = phba->pport;
337 cmdiocbq->context3 = bmp; 339 cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1336 } 1338 }
1337 1339
1338 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344
1339 /* The exchange is done, mark the entry as invalid */ 1345 /* The exchange is done, mark the entry as invalid */
1340 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1346 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1341 } else 1347 } else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
1463} 1469}
1464 1470
1465/** 1471/**
1466 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command 1472 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1473 * @phba: Pointer to HBA context object.
1467 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1474 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1468 * 1475 *
1469 * This function is responsible for placing a port into diagnostic loopback 1476 * This function is responsible for preparing driver for diag loopback
1470 * mode in order to perform a diagnostic loopback test. 1477 * on device.
1478 */
1479static int
1480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
1481{
1482 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost;
1484 struct lpfc_sli *psli;
1485 struct lpfc_sli_ring *pring;
1486 int i = 0;
1487
1488 psli = &phba->sli;
1489 if (!psli)
1490 return -ENODEV;
1491
1492 pring = &psli->ring[LPFC_FCP_RING];
1493 if (!pring)
1494 return -ENODEV;
1495
1496 if ((phba->link_state == LPFC_HBA_ERROR) ||
1497 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1498 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1499 return -EACCES;
1500
1501 vports = lpfc_create_vport_work_array(phba);
1502 if (vports) {
1503 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1504 shost = lpfc_shost_from_vport(vports[i]);
1505 scsi_block_requests(shost);
1506 }
1507 lpfc_destroy_vport_work_array(phba, vports);
1508 } else {
1509 shost = lpfc_shost_from_vport(phba->pport);
1510 scsi_block_requests(shost);
1511 }
1512
1513 while (pring->txcmplq_cnt) {
1514 if (i++ > 500) /* wait up to 5 seconds */
1515 break;
1516 msleep(10);
1517 }
1518 return 0;
1519}
1520
1521/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object.
1524 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1525 *
1526 * This function is responsible for driver exit processing of setting up
1527 * diag loopback mode on device.
1528 */
1529static void
1530lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1531{
1532 struct Scsi_Host *shost;
1533 struct lpfc_vport **vports;
1534 int i;
1535
1536 vports = lpfc_create_vport_work_array(phba);
1537 if (vports) {
1538 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1539 shost = lpfc_shost_from_vport(vports[i]);
1540 scsi_unblock_requests(shost);
1541 }
1542 lpfc_destroy_vport_work_array(phba, vports);
1543 } else {
1544 shost = lpfc_shost_from_vport(phba->pport);
1545 scsi_unblock_requests(shost);
1546 }
1547 return;
1548}
1549
1550/**
1551 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1552 * @phba: Pointer to HBA context object.
1553 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1554 *
1555 * This function is responsible for placing an sli3 port into diagnostic
1556 * loopback mode in order to perform a diagnostic loopback test.
1471 * All new scsi requests are blocked, a small delay is used to allow the 1557 * All new scsi requests are blocked, a small delay is used to allow the
1472 * scsi requests to complete then the link is brought down. If the link is 1558 * scsi requests to complete then the link is brought down. If the link is
1473 * is placed in loopback mode then scsi requests are again allowed 1559 * is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
1475 * All of this is done in-line. 1561 * All of this is done in-line.
1476 */ 1562 */
1477static int 1563static int
1478lpfc_bsg_diag_mode(struct fc_bsg_job *job) 1564lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1479{ 1565{
1480 struct Scsi_Host *shost = job->shost;
1481 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1482 struct lpfc_hba *phba = vport->phba;
1483 struct diag_mode_set *loopback_mode; 1566 struct diag_mode_set *loopback_mode;
1484 struct lpfc_sli *psli = &phba->sli;
1485 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1486 uint32_t link_flags; 1567 uint32_t link_flags;
1487 uint32_t timeout; 1568 uint32_t timeout;
1488 struct lpfc_vport **vports;
1489 LPFC_MBOXQ_t *pmboxq; 1569 LPFC_MBOXQ_t *pmboxq;
1490 int mbxstatus; 1570 int mbxstatus;
1491 int i = 0; 1571 int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1494 /* no data to return just the return code */ 1574 /* no data to return just the return code */
1495 job->reply->reply_payload_rcv_len = 0; 1575 job->reply->reply_payload_rcv_len = 0;
1496 1576
1497 if (job->request_len < 1577 if (job->request_len < sizeof(struct fc_bsg_request) +
1498 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { 1578 sizeof(struct diag_mode_set)) {
1499 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1579 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1500 "2738 Received DIAG MODE request below minimum " 1580 "2738 Received DIAG MODE request size:%d "
1501 "size\n"); 1581 "below the minimum size:%d\n",
1582 job->request_len,
1583 (int)(sizeof(struct fc_bsg_request) +
1584 sizeof(struct diag_mode_set)));
1502 rc = -EINVAL; 1585 rc = -EINVAL;
1503 goto job_error; 1586 goto job_error;
1504 } 1587 }
1505 1588
1589 rc = lpfc_bsg_diag_mode_enter(phba, job);
1590 if (rc)
1591 goto job_error;
1592
1593 /* bring the link to diagnostic mode */
1506 loopback_mode = (struct diag_mode_set *) 1594 loopback_mode = (struct diag_mode_set *)
1507 job->request->rqst_data.h_vendor.vendor_cmd; 1595 job->request->rqst_data.h_vendor.vendor_cmd;
1508 link_flags = loopback_mode->type; 1596 link_flags = loopback_mode->type;
1509 timeout = loopback_mode->timeout * 100; 1597 timeout = loopback_mode->timeout * 100;
1510 1598
1511 if ((phba->link_state == LPFC_HBA_ERROR) ||
1512 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1513 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1514 rc = -EACCES;
1515 goto job_error;
1516 }
1517
1518 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1599 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1519 if (!pmboxq) { 1600 if (!pmboxq) {
1520 rc = -ENOMEM; 1601 rc = -ENOMEM;
1521 goto job_error; 1602 goto loopback_mode_exit;
1522 }
1523
1524 vports = lpfc_create_vport_work_array(phba);
1525 if (vports) {
1526 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1527 shost = lpfc_shost_from_vport(vports[i]);
1528 scsi_block_requests(shost);
1529 }
1530
1531 lpfc_destroy_vport_work_array(phba, vports);
1532 } else {
1533 shost = lpfc_shost_from_vport(phba->pport);
1534 scsi_block_requests(shost);
1535 } 1603 }
1536
1537 while (pring->txcmplq_cnt) {
1538 if (i++ > 500) /* wait up to 5 seconds */
1539 break;
1540
1541 msleep(10);
1542 }
1543
1544 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1604 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1545 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1605 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1546 pmboxq->u.mb.mbxOwner = OWN_HOST; 1606 pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1594 rc = -ENODEV; 1654 rc = -ENODEV;
1595 1655
1596loopback_mode_exit: 1656loopback_mode_exit:
1597 vports = lpfc_create_vport_work_array(phba); 1657 lpfc_bsg_diag_mode_exit(phba);
1598 if (vports) { 1658
1599 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1659 /*
1600 shost = lpfc_shost_from_vport(vports[i]); 1660 * Let SLI layer release mboxq if mbox command completed after timeout.
1601 scsi_unblock_requests(shost); 1661 */
1662 if (mbxstatus != MBX_TIMEOUT)
1663 mempool_free(pmboxq, phba->mbox_mem_pool);
1664
1665job_error:
1666 /* make error code available to userspace */
1667 job->reply->result = rc;
1668 /* complete the job back to userspace if no error */
1669 if (rc == 0)
1670 job->job_done(job);
1671 return rc;
1672}
1673
1674/**
1675 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1676 * @phba: Pointer to HBA context object.
1677 * @diag: Flag for set link to diag or nomral operation state.
1678 *
1679 * This function is responsible for issuing a sli4 mailbox command for setting
1680 * link to either diag state or normal operation state.
1681 */
1682static int
1683lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1684{
1685 LPFC_MBOXQ_t *pmboxq;
1686 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1687 uint32_t req_len, alloc_len;
1688 int mbxstatus = MBX_SUCCESS, rc;
1689
1690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1691 if (!pmboxq)
1692 return -ENOMEM;
1693
1694 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1695 sizeof(struct lpfc_sli4_cfg_mhdr));
1696 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1697 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1698 req_len, LPFC_SLI4_MBX_EMBED);
1699 if (alloc_len != req_len) {
1700 rc = -ENOMEM;
1701 goto link_diag_state_set_out;
1702 }
1703 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1704 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1705 phba->sli4_hba.link_state.number);
1706 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1707 phba->sli4_hba.link_state.type);
1708 if (diag)
1709 bf_set(lpfc_mbx_set_diag_state_diag,
1710 &link_diag_state->u.req, 1);
1711 else
1712 bf_set(lpfc_mbx_set_diag_state_diag,
1713 &link_diag_state->u.req, 0);
1714
1715 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1716
1717 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1718 rc = 0;
1719 else
1720 rc = -ENODEV;
1721
1722link_diag_state_set_out:
1723 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1724 mempool_free(pmboxq, phba->mbox_mem_pool);
1725
1726 return rc;
1727}
1728
1729/**
1730 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1731 * @phba: Pointer to HBA context object.
1732 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1733 *
1734 * This function is responsible for placing an sli4 port into diagnostic
1735 * loopback mode in order to perform a diagnostic loopback test.
1736 */
1737static int
1738lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1739{
1740 struct diag_mode_set *loopback_mode;
1741 uint32_t link_flags, timeout, req_len, alloc_len;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus, i, rc = 0;
1745
1746 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0;
1748
1749 if (job->request_len < sizeof(struct fc_bsg_request) +
1750 sizeof(struct diag_mode_set)) {
1751 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1752 "3011 Received DIAG MODE request size:%d "
1753 "below the minimum size:%d\n",
1754 job->request_len,
1755 (int)(sizeof(struct fc_bsg_request) +
1756 sizeof(struct diag_mode_set)));
1757 rc = -EINVAL;
1758 goto job_error;
1759 }
1760
1761 rc = lpfc_bsg_diag_mode_enter(phba, job);
1762 if (rc)
1763 goto job_error;
1764
1765 /* bring the link to diagnostic mode */
1766 loopback_mode = (struct diag_mode_set *)
1767 job->request->rqst_data.h_vendor.vendor_cmd;
1768 link_flags = loopback_mode->type;
1769 timeout = loopback_mode->timeout * 100;
1770
1771 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1772 if (rc)
1773 goto loopback_mode_exit;
1774
1775 /* wait for link down before proceeding */
1776 i = 0;
1777 while (phba->link_state != LPFC_LINK_DOWN) {
1778 if (i++ > timeout) {
1779 rc = -ETIMEDOUT;
1780 goto loopback_mode_exit;
1781 }
1782 msleep(10);
1783 }
1784 /* set up loopback mode */
1785 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1786 if (!pmboxq) {
1787 rc = -ENOMEM;
1788 goto loopback_mode_exit;
1789 }
1790 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1791 sizeof(struct lpfc_sli4_cfg_mhdr));
1792 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1793 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1794 req_len, LPFC_SLI4_MBX_EMBED);
1795 if (alloc_len != req_len) {
1796 rc = -ENOMEM;
1797 goto loopback_mode_exit;
1798 }
1799 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1800 bf_set(lpfc_mbx_set_diag_state_link_num,
1801 &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
1802 bf_set(lpfc_mbx_set_diag_state_link_type,
1803 &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
1804 if (link_flags == INTERNAL_LOOP_BACK)
1805 bf_set(lpfc_mbx_set_diag_lpbk_type,
1806 &link_diag_loopback->u.req,
1807 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1808 else
1809 bf_set(lpfc_mbx_set_diag_lpbk_type,
1810 &link_diag_loopback->u.req,
1811 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
1812
1813 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1814 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1815 rc = -ENODEV;
1816 else {
1817 phba->link_flag |= LS_LOOPBACK_MODE;
1818 /* wait for the link attention interrupt */
1819 msleep(100);
1820 i = 0;
1821 while (phba->link_state != LPFC_HBA_READY) {
1822 if (i++ > timeout) {
1823 rc = -ETIMEDOUT;
1824 break;
1825 }
1826 msleep(10);
1602 } 1827 }
1603 lpfc_destroy_vport_work_array(phba, vports);
1604 } else {
1605 shost = lpfc_shost_from_vport(phba->pport);
1606 scsi_unblock_requests(shost);
1607 } 1828 }
1608 1829
1830loopback_mode_exit:
1831 lpfc_bsg_diag_mode_exit(phba);
1832
1609 /* 1833 /*
1610 * Let SLI layer release mboxq if mbox command completed after timeout. 1834 * Let SLI layer release mboxq if mbox command completed after timeout.
1611 */ 1835 */
1612 if (mbxstatus != MBX_TIMEOUT) 1836 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1613 mempool_free(pmboxq, phba->mbox_mem_pool); 1837 mempool_free(pmboxq, phba->mbox_mem_pool);
1614 1838
1615job_error: 1839job_error:
@@ -1622,6 +1846,234 @@ job_error:
1622} 1846}
1623 1847
1624/** 1848/**
1849 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
1850 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1851 *
1852 * This function is responsible for responding to check and dispatch bsg diag
1853 * command from the user to proper driver action routines.
1854 */
1855static int
1856lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1857{
1858 struct Scsi_Host *shost;
1859 struct lpfc_vport *vport;
1860 struct lpfc_hba *phba;
1861 int rc;
1862
1863 shost = job->shost;
1864 if (!shost)
1865 return -ENODEV;
1866 vport = (struct lpfc_vport *)job->shost->hostdata;
1867 if (!vport)
1868 return -ENODEV;
1869 phba = vport->phba;
1870 if (!phba)
1871 return -ENODEV;
1872
1873 if (phba->sli_rev < LPFC_SLI_REV4)
1874 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1875 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1876 LPFC_SLI_INTF_IF_TYPE_2)
1877 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1878 else
1879 rc = -ENODEV;
1880
1881 return rc;
1882
1883}
1884
1885/**
1886 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
1887 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
1888 *
1889 * This function is responsible for responding to check and dispatch bsg diag
1890 * command from the user to proper driver action routines.
1891 */
1892static int
1893lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1894{
1895 struct Scsi_Host *shost;
1896 struct lpfc_vport *vport;
1897 struct lpfc_hba *phba;
1898 int rc;
1899
1900 shost = job->shost;
1901 if (!shost)
1902 return -ENODEV;
1903 vport = (struct lpfc_vport *)job->shost->hostdata;
1904 if (!vport)
1905 return -ENODEV;
1906 phba = vport->phba;
1907 if (!phba)
1908 return -ENODEV;
1909
1910 if (phba->sli_rev < LPFC_SLI_REV4)
1911 return -ENODEV;
1912 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1913 LPFC_SLI_INTF_IF_TYPE_2)
1914 return -ENODEV;
1915
1916 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
1917
1918 if (!rc)
1919 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1920
1921 return rc;
1922}
1923
1924/**
1925 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
1926 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
1927 *
1928 * This function is to perform SLI4 diag link test request from the user
1929 * applicaiton.
1930 */
1931static int
1932lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1933{
1934 struct Scsi_Host *shost;
1935 struct lpfc_vport *vport;
1936 struct lpfc_hba *phba;
1937 LPFC_MBOXQ_t *pmboxq;
1938 struct sli4_link_diag *link_diag_test_cmd;
1939 uint32_t req_len, alloc_len;
1940 uint32_t timeout;
1941 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
1942 union lpfc_sli4_cfg_shdr *shdr;
1943 uint32_t shdr_status, shdr_add_status;
1944 struct diag_status *diag_status_reply;
1945 int mbxstatus, rc = 0;
1946
1947 shost = job->shost;
1948 if (!shost) {
1949 rc = -ENODEV;
1950 goto job_error;
1951 }
1952 vport = (struct lpfc_vport *)job->shost->hostdata;
1953 if (!vport) {
1954 rc = -ENODEV;
1955 goto job_error;
1956 }
1957 phba = vport->phba;
1958 if (!phba) {
1959 rc = -ENODEV;
1960 goto job_error;
1961 }
1962
1963 if (phba->sli_rev < LPFC_SLI_REV4) {
1964 rc = -ENODEV;
1965 goto job_error;
1966 }
1967 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1968 LPFC_SLI_INTF_IF_TYPE_2) {
1969 rc = -ENODEV;
1970 goto job_error;
1971 }
1972
1973 if (job->request_len < sizeof(struct fc_bsg_request) +
1974 sizeof(struct sli4_link_diag)) {
1975 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1976 "3013 Received LINK DIAG TEST request "
1977 " size:%d below the minimum size:%d\n",
1978 job->request_len,
1979 (int)(sizeof(struct fc_bsg_request) +
1980 sizeof(struct sli4_link_diag)));
1981 rc = -EINVAL;
1982 goto job_error;
1983 }
1984
1985 rc = lpfc_bsg_diag_mode_enter(phba, job);
1986 if (rc)
1987 goto job_error;
1988
1989 link_diag_test_cmd = (struct sli4_link_diag *)
1990 job->request->rqst_data.h_vendor.vendor_cmd;
1991 timeout = link_diag_test_cmd->timeout * 100;
1992
1993 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1994
1995 if (rc)
1996 goto job_error;
1997
1998 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1999 if (!pmboxq) {
2000 rc = -ENOMEM;
2001 goto link_diag_test_exit;
2002 }
2003
2004 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2005 sizeof(struct lpfc_sli4_cfg_mhdr));
2006 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2007 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2008 req_len, LPFC_SLI4_MBX_EMBED);
2009 if (alloc_len != req_len) {
2010 rc = -ENOMEM;
2011 goto link_diag_test_exit;
2012 }
2013 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2014 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2015 phba->sli4_hba.link_state.number);
2016 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2017 phba->sli4_hba.link_state.type);
2018 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2019 link_diag_test_cmd->test_id);
2020 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2021 link_diag_test_cmd->loops);
2022 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2023 link_diag_test_cmd->test_version);
2024 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2025 link_diag_test_cmd->error_action);
2026
2027 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2028
2029 shdr = (union lpfc_sli4_cfg_shdr *)
2030 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2031 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2032 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2033 if (shdr_status || shdr_add_status || mbxstatus) {
2034 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2035 "3010 Run link diag test mailbox failed with "
2036 "mbx_status x%x status x%x, add_status x%x\n",
2037 mbxstatus, shdr_status, shdr_add_status);
2038 }
2039
2040 diag_status_reply = (struct diag_status *)
2041 job->reply->reply_data.vendor_reply.vendor_rsp;
2042
2043 if (job->reply_len <
2044 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2045 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2046 "3012 Received Run link diag test reply "
2047 "below minimum size (%d): reply_len:%d\n",
2048 (int)(sizeof(struct fc_bsg_request) +
2049 sizeof(struct diag_status)),
2050 job->reply_len);
2051 rc = -EINVAL;
2052 goto job_error;
2053 }
2054
2055 diag_status_reply->mbox_status = mbxstatus;
2056 diag_status_reply->shdr_status = shdr_status;
2057 diag_status_reply->shdr_add_status = shdr_add_status;
2058
2059link_diag_test_exit:
2060 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2061
2062 if (pmboxq)
2063 mempool_free(pmboxq, phba->mbox_mem_pool);
2064
2065 lpfc_bsg_diag_mode_exit(phba);
2066
2067job_error:
2068 /* make error code available to userspace */
2069 job->reply->result = rc;
2070 /* complete the job back to userspace if no error */
2071 if (rc == 0)
2072 job->job_done(job);
2073 return rc;
2074}
2075
2076/**
1625 * lpfcdiag_loop_self_reg - obtains a remote port login id 2077 * lpfcdiag_loop_self_reg - obtains a remote port login id
1626 * @phba: Pointer to HBA context object 2078 * @phba: Pointer to HBA context object
1627 * @rpi: Pointer to a remote port login id 2079 * @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
1851} 2303}
1852 2304
1853/** 2305/**
2306 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2307 * @phba: Pointer to HBA context object
2308 *
2309 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2310 * retruns the pointer to the buffer.
2311 **/
2312static struct lpfc_dmabuf *
2313lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2314{
2315 struct lpfc_dmabuf *dmabuf;
2316 struct pci_dev *pcidev = phba->pcidev;
2317
2318 /* allocate dma buffer struct */
2319 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2320 if (!dmabuf)
2321 return NULL;
2322
2323 INIT_LIST_HEAD(&dmabuf->list);
2324
2325 /* now, allocate dma buffer */
2326 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2327 &(dmabuf->phys), GFP_KERNEL);
2328
2329 if (!dmabuf->virt) {
2330 kfree(dmabuf);
2331 return NULL;
2332 }
2333 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2334
2335 return dmabuf;
2336}
2337
2338/**
2339 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2340 * @phba: Pointer to HBA context object.
2341 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2342 *
2343 * This routine just simply frees a dma buffer and its associated buffer
2344 * descriptor referred by @dmabuf.
2345 **/
2346static void
2347lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2348{
2349 struct pci_dev *pcidev = phba->pcidev;
2350
2351 if (!dmabuf)
2352 return;
2353
2354 if (dmabuf->virt)
2355 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2356 dmabuf->virt, dmabuf->phys);
2357 kfree(dmabuf);
2358 return;
2359}
2360
2361/**
2362 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2363 * @phba: Pointer to HBA context object.
2364 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2365 *
2366 * This routine just simply frees all dma buffers and their associated buffer
2367 * descriptors referred by @dmabuf_list.
2368 **/
2369static void
2370lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2371 struct list_head *dmabuf_list)
2372{
2373 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2374
2375 if (list_empty(dmabuf_list))
2376 return;
2377
2378 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2379 list_del_init(&dmabuf->list);
2380 lpfc_bsg_dma_page_free(phba, dmabuf);
2381 }
2382 return;
2383}
2384
2385/**
1854 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2386 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1855 * @phba: Pointer to HBA context object 2387 * @phba: Pointer to HBA context object
1856 * @bpl: Pointer to 64 bit bde structure 2388 * @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
2067} 2599}
2068 2600
2069/** 2601/**
2070 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself 2602 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2071 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2603 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2072 * 2604 *
2073 * This function receives a user data buffer to be transmitted and received on 2605 * This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
2086 * of loopback mode. 2618 * of loopback mode.
2087 **/ 2619 **/
2088static int 2620static int
2089lpfc_bsg_diag_test(struct fc_bsg_job *job) 2621lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2090{ 2622{
2091 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2623 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2092 struct lpfc_hba *phba = vport->phba; 2624 struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
2411} 2943}
2412 2944
2413/** 2945/**
2414 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler 2946 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
2415 * @phba: Pointer to HBA context object. 2947 * @phba: Pointer to HBA context object.
2416 * @pmboxq: Pointer to mailbox command. 2948 * @pmboxq: Pointer to mailbox command.
2417 * 2949 *
@@ -2422,15 +2954,13 @@ job_error:
2422 * of the mailbox. 2954 * of the mailbox.
2423 **/ 2955 **/
2424void 2956void
2425lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2957lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2426{ 2958{
2427 struct bsg_job_data *dd_data; 2959 struct bsg_job_data *dd_data;
2428 struct fc_bsg_job *job; 2960 struct fc_bsg_job *job;
2429 struct lpfc_mbx_nembed_cmd *nembed_sge;
2430 uint32_t size; 2961 uint32_t size;
2431 unsigned long flags; 2962 unsigned long flags;
2432 uint8_t *to; 2963 uint8_t *pmb, *pmb_buf;
2433 uint8_t *from;
2434 2964
2435 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2965 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2436 dd_data = pmboxq->context1; 2966 dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2440 return; 2970 return;
2441 } 2971 }
2442 2972
2443 /* build the outgoing buffer to do an sg copy 2973 /*
2444 * the format is the response mailbox followed by any extended 2974 * The outgoing buffer is readily referred from the dma buffer,
2445 * mailbox data 2975 * just need to get header part from mailboxq structure.
2446 */ 2976 */
2447 from = (uint8_t *)&pmboxq->u.mb; 2977 pmb = (uint8_t *)&pmboxq->u.mb;
2448 to = (uint8_t *)dd_data->context_un.mbox.mb; 2978 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
2449 memcpy(to, from, sizeof(MAILBOX_t)); 2979 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
2450 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
2451 /* copy the extended data if any, count is in words */
2452 if (dd_data->context_un.mbox.outExtWLen) {
2453 from = (uint8_t *)dd_data->context_un.mbox.ext;
2454 to += sizeof(MAILBOX_t);
2455 size = dd_data->context_un.mbox.outExtWLen *
2456 sizeof(uint32_t);
2457 memcpy(to, from, size);
2458 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
2459 from = (uint8_t *)dd_data->context_un.mbox.
2460 dmp->dma.virt;
2461 to += sizeof(MAILBOX_t);
2462 size = dd_data->context_un.mbox.dmp->size;
2463 memcpy(to, from, size);
2464 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2465 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
2466 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2467 virt;
2468 to += sizeof(MAILBOX_t);
2469 size = pmboxq->u.mb.un.varWords[5];
2470 memcpy(to, from, size);
2471 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2472 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2473 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2474 &pmboxq->u.mb.un.varWords[0];
2475
2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2477 virt;
2478 to += sizeof(MAILBOX_t);
2479 size = nembed_sge->sge[0].length;
2480 memcpy(to, from, size);
2481 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2482 from = (uint8_t *)dd_data->context_un.
2483 mbox.dmp->dma.virt;
2484 to += sizeof(MAILBOX_t);
2485 size = dd_data->context_un.mbox.dmp->size;
2486 memcpy(to, from, size);
2487 }
2488 }
2489 2980
2490 from = (uint8_t *)dd_data->context_un.mbox.mb;
2491 job = dd_data->context_un.mbox.set_job; 2981 job = dd_data->context_un.mbox.set_job;
2492 if (job) { 2982 if (job) {
2493 size = job->reply_payload.payload_len; 2983 size = job->reply_payload.payload_len;
2494 job->reply->reply_payload_rcv_len = 2984 job->reply->reply_payload_rcv_len =
2495 sg_copy_from_buffer(job->reply_payload.sg_list, 2985 sg_copy_from_buffer(job->reply_payload.sg_list,
2496 job->reply_payload.sg_cnt, 2986 job->reply_payload.sg_cnt,
2497 from, size); 2987 pmb_buf, size);
2498 job->reply->result = 0;
2499 /* need to hold the lock until we set job->dd_data to NULL 2988 /* need to hold the lock until we set job->dd_data to NULL
2500 * to hold off the timeout handler returning to the mid-layer 2989 * to hold off the timeout handler returning to the mid-layer
2501 * while we are still processing the job. 2990 * while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2503 job->dd_data = NULL; 2992 job->dd_data = NULL;
2504 dd_data->context_un.mbox.set_job = NULL; 2993 dd_data->context_un.mbox.set_job = NULL;
2505 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2994 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2506 job->job_done(job);
2507 } else { 2995 } else {
2508 dd_data->context_un.mbox.set_job = NULL; 2996 dd_data->context_un.mbox.set_job = NULL;
2509 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2997 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2510 } 2998 }
2511 2999
2512 kfree(dd_data->context_un.mbox.mb);
2513 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3000 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2514 kfree(dd_data->context_un.mbox.ext); 3001 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
2515 if (dd_data->context_un.mbox.dmp) {
2516 dma_free_coherent(&phba->pcidev->dev,
2517 dd_data->context_un.mbox.dmp->size,
2518 dd_data->context_un.mbox.dmp->dma.virt,
2519 dd_data->context_un.mbox.dmp->dma.phys);
2520 kfree(dd_data->context_un.mbox.dmp);
2521 }
2522 if (dd_data->context_un.mbox.rxbmp) {
2523 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2524 dd_data->context_un.mbox.rxbmp->phys);
2525 kfree(dd_data->context_un.mbox.rxbmp);
2526 }
2527 kfree(dd_data); 3002 kfree(dd_data);
3003
3004 if (job) {
3005 job->reply->result = 0;
3006 job->job_done(job);
3007 }
2528 return; 3008 return;
2529} 3009}
2530 3010
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2619} 3099}
2620 3100
2621/** 3101/**
3102 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3103 * @phba: Pointer to HBA context object.
3104 *
3105 * This is routine clean up and reset BSG handling of multi-buffer mbox
3106 * command session.
3107 **/
3108static void
3109lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3110{
3111 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3112 return;
3113
3114 /* free all memory, including dma buffers */
3115 lpfc_bsg_dma_page_list_free(phba,
3116 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3117 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3118 /* multi-buffer write mailbox command pass-through complete */
3119 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3120 sizeof(struct lpfc_mbox_ext_buf_ctx));
3121 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3122
3123 return;
3124}
3125
3126/**
3127 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3128 * @phba: Pointer to HBA context object.
3129 * @pmboxq: Pointer to mailbox command.
3130 *
3131 * This is routine handles BSG job for mailbox commands completions with
3132 * multiple external buffers.
3133 **/
3134static struct fc_bsg_job *
3135lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3136{
3137 struct bsg_job_data *dd_data;
3138 struct fc_bsg_job *job;
3139 uint8_t *pmb, *pmb_buf;
3140 unsigned long flags;
3141 uint32_t size;
3142 int rc = 0;
3143
3144 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3145 dd_data = pmboxq->context1;
3146 /* has the job already timed out? */
3147 if (!dd_data) {
3148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3149 job = NULL;
3150 goto job_done_out;
3151 }
3152
3153 /*
3154 * The outgoing buffer is readily referred from the dma buffer,
3155 * just need to get header part from mailboxq structure.
3156 */
3157 pmb = (uint8_t *)&pmboxq->u.mb;
3158 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3159 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3160
3161 job = dd_data->context_un.mbox.set_job;
3162 if (job) {
3163 size = job->reply_payload.payload_len;
3164 job->reply->reply_payload_rcv_len =
3165 sg_copy_from_buffer(job->reply_payload.sg_list,
3166 job->reply_payload.sg_cnt,
3167 pmb_buf, size);
3168 /* result for successful */
3169 job->reply->result = 0;
3170 job->dd_data = NULL;
3171 /* need to hold the lock util we set job->dd_data to NULL
3172 * to hold off the timeout handler from midlayer to take
3173 * any action.
3174 */
3175 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3176 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3177 "2937 SLI_CONFIG ext-buffer maibox command "
3178 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3179 phba->mbox_ext_buf_ctx.nembType,
3180 phba->mbox_ext_buf_ctx.mboxType, size);
3181 } else
3182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183
3184job_done_out:
3185 if (!job)
3186 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3187 "2938 SLI_CONFIG ext-buffer maibox "
3188 "command (x%x/x%x) failure, rc:x%x\n",
3189 phba->mbox_ext_buf_ctx.nembType,
3190 phba->mbox_ext_buf_ctx.mboxType, rc);
3191 /* state change */
3192 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3193 kfree(dd_data);
3194
3195 return job;
3196}
3197
3198/**
3199 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3200 * @phba: Pointer to HBA context object.
3201 * @pmboxq: Pointer to mailbox command.
3202 *
3203 * This is completion handler function for mailbox read commands with multiple
3204 * external buffers.
3205 **/
3206static void
3207lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3208{
3209 struct fc_bsg_job *job;
3210
3211 /* handle the BSG job with mailbox command */
3212 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3213 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3214
3215 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3216 "2939 SLI_CONFIG ext-buffer rd maibox command "
3217 "complete, ctxState:x%x, mbxStatus:x%x\n",
3218 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3219
3220 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3221
3222 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3223 lpfc_bsg_mbox_ext_session_reset(phba);
3224
3225 /* free base driver mailbox structure memory */
3226 mempool_free(pmboxq, phba->mbox_mem_pool);
3227
3228 /* complete the bsg job if we have it */
3229 if (job)
3230 job->job_done(job);
3231
3232 return;
3233}
3234
3235/**
3236 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3237 * @phba: Pointer to HBA context object.
3238 * @pmboxq: Pointer to mailbox command.
3239 *
3240 * This is completion handler function for mailbox write commands with multiple
3241 * external buffers.
3242 **/
3243static void
3244lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3245{
3246 struct fc_bsg_job *job;
3247
3248 /* handle the BSG job with the mailbox command */
3249 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3250 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3251
3252 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3253 "2940 SLI_CONFIG ext-buffer wr maibox command "
3254 "complete, ctxState:x%x, mbxStatus:x%x\n",
3255 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3256
3257 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3258
3259 /* free all memory, including dma buffers */
3260 mempool_free(pmboxq, phba->mbox_mem_pool);
3261 lpfc_bsg_mbox_ext_session_reset(phba);
3262
3263 /* complete the bsg job if we have it */
3264 if (job)
3265 job->job_done(job);
3266
3267 return;
3268}
3269
3270static void
3271lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3272 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3273 struct lpfc_dmabuf *ext_dmabuf)
3274{
3275 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3276
3277 /* pointer to the start of mailbox command */
3278 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3279
3280 if (nemb_tp == nemb_mse) {
3281 if (index == 0) {
3282 sli_cfg_mbx->un.sli_config_emb0_subsys.
3283 mse[index].pa_hi =
3284 putPaddrHigh(mbx_dmabuf->phys +
3285 sizeof(MAILBOX_t));
3286 sli_cfg_mbx->un.sli_config_emb0_subsys.
3287 mse[index].pa_lo =
3288 putPaddrLow(mbx_dmabuf->phys +
3289 sizeof(MAILBOX_t));
3290 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3291 "2943 SLI_CONFIG(mse)[%d], "
3292 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3293 index,
3294 sli_cfg_mbx->un.sli_config_emb0_subsys.
3295 mse[index].buf_len,
3296 sli_cfg_mbx->un.sli_config_emb0_subsys.
3297 mse[index].pa_hi,
3298 sli_cfg_mbx->un.sli_config_emb0_subsys.
3299 mse[index].pa_lo);
3300 } else {
3301 sli_cfg_mbx->un.sli_config_emb0_subsys.
3302 mse[index].pa_hi =
3303 putPaddrHigh(ext_dmabuf->phys);
3304 sli_cfg_mbx->un.sli_config_emb0_subsys.
3305 mse[index].pa_lo =
3306 putPaddrLow(ext_dmabuf->phys);
3307 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3308 "2944 SLI_CONFIG(mse)[%d], "
3309 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3310 index,
3311 sli_cfg_mbx->un.sli_config_emb0_subsys.
3312 mse[index].buf_len,
3313 sli_cfg_mbx->un.sli_config_emb0_subsys.
3314 mse[index].pa_hi,
3315 sli_cfg_mbx->un.sli_config_emb0_subsys.
3316 mse[index].pa_lo);
3317 }
3318 } else {
3319 if (index == 0) {
3320 sli_cfg_mbx->un.sli_config_emb1_subsys.
3321 hbd[index].pa_hi =
3322 putPaddrHigh(mbx_dmabuf->phys +
3323 sizeof(MAILBOX_t));
3324 sli_cfg_mbx->un.sli_config_emb1_subsys.
3325 hbd[index].pa_lo =
3326 putPaddrLow(mbx_dmabuf->phys +
3327 sizeof(MAILBOX_t));
3328 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3329 "3007 SLI_CONFIG(hbd)[%d], "
3330 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3331 index,
3332 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3333 &sli_cfg_mbx->un.
3334 sli_config_emb1_subsys.hbd[index]),
3335 sli_cfg_mbx->un.sli_config_emb1_subsys.
3336 hbd[index].pa_hi,
3337 sli_cfg_mbx->un.sli_config_emb1_subsys.
3338 hbd[index].pa_lo);
3339
3340 } else {
3341 sli_cfg_mbx->un.sli_config_emb1_subsys.
3342 hbd[index].pa_hi =
3343 putPaddrHigh(ext_dmabuf->phys);
3344 sli_cfg_mbx->un.sli_config_emb1_subsys.
3345 hbd[index].pa_lo =
3346 putPaddrLow(ext_dmabuf->phys);
3347 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3348 "3008 SLI_CONFIG(hbd)[%d], "
3349 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3350 index,
3351 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3352 &sli_cfg_mbx->un.
3353 sli_config_emb1_subsys.hbd[index]),
3354 sli_cfg_mbx->un.sli_config_emb1_subsys.
3355 hbd[index].pa_hi,
3356 sli_cfg_mbx->un.sli_config_emb1_subsys.
3357 hbd[index].pa_lo);
3358 }
3359 }
3360 return;
3361}
3362
3363/**
3364 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3365 * @phba: Pointer to HBA context object.
3366 * @mb: Pointer to a BSG mailbox object.
3367 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3368 * @dmabuff: Pointer to a DMA buffer descriptor.
3369 *
3370 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3371 * non-embedded external bufffers.
3372 **/
3373static int
3374lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3375 enum nemb_type nemb_tp,
3376 struct lpfc_dmabuf *dmabuf)
3377{
3378 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3379 struct dfc_mbox_req *mbox_req;
3380 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3381 uint32_t ext_buf_cnt, ext_buf_index;
3382 struct lpfc_dmabuf *ext_dmabuf = NULL;
3383 struct bsg_job_data *dd_data = NULL;
3384 LPFC_MBOXQ_t *pmboxq = NULL;
3385 MAILBOX_t *pmb;
3386 uint8_t *pmbx;
3387 int rc, i;
3388
3389 mbox_req =
3390 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3391
3392 /* pointer to the start of mailbox command */
3393 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3394
3395 if (nemb_tp == nemb_mse) {
3396 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3397 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3398 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3399 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3400 "2945 Handled SLI_CONFIG(mse) rd, "
3401 "ext_buf_cnt(%d) out of range(%d)\n",
3402 ext_buf_cnt,
3403 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3404 rc = -ERANGE;
3405 goto job_error;
3406 }
3407 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3408 "2941 Handled SLI_CONFIG(mse) rd, "
3409 "ext_buf_cnt:%d\n", ext_buf_cnt);
3410 } else {
3411 /* sanity check on interface type for support */
3412 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3413 LPFC_SLI_INTF_IF_TYPE_2) {
3414 rc = -ENODEV;
3415 goto job_error;
3416 }
3417 /* nemb_tp == nemb_hbd */
3418 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3419 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3420 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3421 "2946 Handled SLI_CONFIG(hbd) rd, "
3422 "ext_buf_cnt(%d) out of range(%d)\n",
3423 ext_buf_cnt,
3424 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3425 rc = -ERANGE;
3426 goto job_error;
3427 }
3428 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3429 "2942 Handled SLI_CONFIG(hbd) rd, "
3430 "ext_buf_cnt:%d\n", ext_buf_cnt);
3431 }
3432
3433 /* reject non-embedded mailbox command with none external buffer */
3434 if (ext_buf_cnt == 0) {
3435 rc = -EPERM;
3436 goto job_error;
3437 } else if (ext_buf_cnt > 1) {
3438 /* additional external read buffers */
3439 for (i = 1; i < ext_buf_cnt; i++) {
3440 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3441 if (!ext_dmabuf) {
3442 rc = -ENOMEM;
3443 goto job_error;
3444 }
3445 list_add_tail(&ext_dmabuf->list,
3446 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3447 }
3448 }
3449
3450 /* bsg tracking structure */
3451 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3452 if (!dd_data) {
3453 rc = -ENOMEM;
3454 goto job_error;
3455 }
3456
3457 /* mailbox command structure for base driver */
3458 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3459 if (!pmboxq) {
3460 rc = -ENOMEM;
3461 goto job_error;
3462 }
3463 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3464
3465 /* for the first external buffer */
3466 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3467
3468 /* for the rest of external buffer descriptors if any */
3469 if (ext_buf_cnt > 1) {
3470 ext_buf_index = 1;
3471 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3472 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3473 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3474 ext_buf_index, dmabuf,
3475 curr_dmabuf);
3476 ext_buf_index++;
3477 }
3478 }
3479
3480 /* construct base driver mbox command */
3481 pmb = &pmboxq->u.mb;
3482 pmbx = (uint8_t *)dmabuf->virt;
3483 memcpy(pmb, pmbx, sizeof(*pmb));
3484 pmb->mbxOwner = OWN_HOST;
3485 pmboxq->vport = phba->pport;
3486
3487 /* multi-buffer handling context */
3488 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3489 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3490 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3491 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3492 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3493 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3494
3495 /* callback for multi-buffer read mailbox command */
3496 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3497
3498 /* context fields to callback function */
3499 pmboxq->context1 = dd_data;
3500 dd_data->type = TYPE_MBOX;
3501 dd_data->context_un.mbox.pmboxq = pmboxq;
3502 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3503 dd_data->context_un.mbox.set_job = job;
3504 job->dd_data = dd_data;
3505
3506 /* state change */
3507 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3508
3509 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3510 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3511 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3512 "2947 Issued SLI_CONFIG ext-buffer "
3513 "maibox command, rc:x%x\n", rc);
3514 return 1;
3515 }
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3517 "2948 Failed to issue SLI_CONFIG ext-buffer "
3518 "maibox command, rc:x%x\n", rc);
3519 rc = -EPIPE;
3520
3521job_error:
3522 if (pmboxq)
3523 mempool_free(pmboxq, phba->mbox_mem_pool);
3524 lpfc_bsg_dma_page_list_free(phba,
3525 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3526 kfree(dd_data);
3527 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3528 return rc;
3529}
3530
3531/**
3532 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3533 * @phba: Pointer to HBA context object.
3534 * @mb: Pointer to a BSG mailbox object.
3535 * @dmabuff: Pointer to a DMA buffer descriptor.
3536 *
3537 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3538 * non-embedded external bufffers.
3539 **/
3540static int
3541lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3542 enum nemb_type nemb_tp,
3543 struct lpfc_dmabuf *dmabuf)
3544{
3545 struct dfc_mbox_req *mbox_req;
3546 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3547 uint32_t ext_buf_cnt;
3548 struct bsg_job_data *dd_data = NULL;
3549 LPFC_MBOXQ_t *pmboxq = NULL;
3550 MAILBOX_t *pmb;
3551 uint8_t *mbx;
3552 int rc = 0, i;
3553
3554 mbox_req =
3555 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3556
3557 /* pointer to the start of mailbox command */
3558 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3559
3560 if (nemb_tp == nemb_mse) {
3561 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3562 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3563 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3564 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3565 "2953 Handled SLI_CONFIG(mse) wr, "
3566 "ext_buf_cnt(%d) out of range(%d)\n",
3567 ext_buf_cnt,
3568 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3569 return -ERANGE;
3570 }
3571 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3572 "2949 Handled SLI_CONFIG(mse) wr, "
3573 "ext_buf_cnt:%d\n", ext_buf_cnt);
3574 } else {
3575 /* sanity check on interface type for support */
3576 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3577 LPFC_SLI_INTF_IF_TYPE_2)
3578 return -ENODEV;
3579 /* nemb_tp == nemb_hbd */
3580 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3581 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3582 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3583 "2954 Handled SLI_CONFIG(hbd) wr, "
3584 "ext_buf_cnt(%d) out of range(%d)\n",
3585 ext_buf_cnt,
3586 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3587 return -ERANGE;
3588 }
3589 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3590 "2950 Handled SLI_CONFIG(hbd) wr, "
3591 "ext_buf_cnt:%d\n", ext_buf_cnt);
3592 }
3593
3594 if (ext_buf_cnt == 0)
3595 return -EPERM;
3596
3597 /* for the first external buffer */
3598 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599
3600 /* log for looking forward */
3601 for (i = 1; i < ext_buf_cnt; i++) {
3602 if (nemb_tp == nemb_mse)
3603 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3604 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3605 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3606 mse[i].buf_len);
3607 else
3608 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3609 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3610 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3611 &sli_cfg_mbx->un.sli_config_emb1_subsys.
3612 hbd[i]));
3613 }
3614
3615 /* multi-buffer handling context */
3616 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3617 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3618 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3619 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3620 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3621 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3622
3623 if (ext_buf_cnt == 1) {
3624 /* bsg tracking structure */
3625 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3626 if (!dd_data) {
3627 rc = -ENOMEM;
3628 goto job_error;
3629 }
3630
3631 /* mailbox command structure for base driver */
3632 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3633 if (!pmboxq) {
3634 rc = -ENOMEM;
3635 goto job_error;
3636 }
3637 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3638 pmb = &pmboxq->u.mb;
3639 mbx = (uint8_t *)dmabuf->virt;
3640 memcpy(pmb, mbx, sizeof(*pmb));
3641 pmb->mbxOwner = OWN_HOST;
3642 pmboxq->vport = phba->pport;
3643
3644 /* callback for multi-buffer read mailbox command */
3645 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3646
3647 /* context fields to callback function */
3648 pmboxq->context1 = dd_data;
3649 dd_data->type = TYPE_MBOX;
3650 dd_data->context_un.mbox.pmboxq = pmboxq;
3651 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3652 dd_data->context_un.mbox.set_job = job;
3653 job->dd_data = dd_data;
3654
3655 /* state change */
3656 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3657
3658 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3659 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2955 Issued SLI_CONFIG ext-buffer "
3662 "maibox command, rc:x%x\n", rc);
3663 return 1;
3664 }
3665 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3666 "2956 Failed to issue SLI_CONFIG ext-buffer "
3667 "maibox command, rc:x%x\n", rc);
3668 rc = -EPIPE;
3669 }
3670
3671job_error:
3672 if (pmboxq)
3673 mempool_free(pmboxq, phba->mbox_mem_pool);
3674 kfree(dd_data);
3675
3676 return rc;
3677}
3678
3679/**
3680 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3681 * @phba: Pointer to HBA context object.
3682 * @mb: Pointer to a BSG mailbox object.
3683 * @dmabuff: Pointer to a DMA buffer descriptor.
3684 *
3685 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3686 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3687 * with embedded sussystem 0x1 and opcodes with external HBDs.
3688 **/
3689static int
3690lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3691 struct lpfc_dmabuf *dmabuf)
3692{
3693 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3694 uint32_t subsys;
3695 uint32_t opcode;
3696 int rc = SLI_CONFIG_NOT_HANDLED;
3697
3698 /* state change */
3699 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3700
3701 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3702
3703 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3704 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3705 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3706 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3707 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3708 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3709 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3710 switch (opcode) {
3711 case FCOE_OPCODE_READ_FCF:
3712 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3713 "2957 Handled SLI_CONFIG "
3714 "subsys_fcoe, opcode:x%x\n",
3715 opcode);
3716 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3717 nemb_mse, dmabuf);
3718 break;
3719 case FCOE_OPCODE_ADD_FCF:
3720 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3721 "2958 Handled SLI_CONFIG "
3722 "subsys_fcoe, opcode:x%x\n",
3723 opcode);
3724 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3725 nemb_mse, dmabuf);
3726 break;
3727 default:
3728 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3729 "2959 Not handled SLI_CONFIG "
3730 "subsys_fcoe, opcode:x%x\n",
3731 opcode);
3732 rc = SLI_CONFIG_NOT_HANDLED;
3733 break;
3734 }
3735 } else {
3736 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3737 "2977 Handled SLI_CONFIG "
3738 "subsys:x%d, opcode:x%x\n",
3739 subsys, opcode);
3740 rc = SLI_CONFIG_NOT_HANDLED;
3741 }
3742 } else {
3743 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
3744 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3745 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
3746 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3747 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3748 switch (opcode) {
3749 case COMN_OPCODE_READ_OBJECT:
3750 case COMN_OPCODE_READ_OBJECT_LIST:
3751 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3752 "2960 Handled SLI_CONFIG "
3753 "subsys_comn, opcode:x%x\n",
3754 opcode);
3755 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3756 nemb_hbd, dmabuf);
3757 break;
3758 case COMN_OPCODE_WRITE_OBJECT:
3759 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3760 "2961 Handled SLI_CONFIG "
3761 "subsys_comn, opcode:x%x\n",
3762 opcode);
3763 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3764 nemb_hbd, dmabuf);
3765 break;
3766 default:
3767 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3768 "2962 Not handled SLI_CONFIG "
3769 "subsys_comn, opcode:x%x\n",
3770 opcode);
3771 rc = SLI_CONFIG_NOT_HANDLED;
3772 break;
3773 }
3774 } else {
3775 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3776 "2978 Handled SLI_CONFIG "
3777 "subsys:x%d, opcode:x%x\n",
3778 subsys, opcode);
3779 rc = SLI_CONFIG_NOT_HANDLED;
3780 }
3781 }
3782 return rc;
3783}
3784
3785/**
3786 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
3787 * @phba: Pointer to HBA context object.
3788 *
3789 * This routine is for requesting to abort a pass-through mailbox command with
3790 * multiple external buffers due to error condition.
3791 **/
3792static void
3793lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
3794{
3795 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
3796 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3797 else
3798 lpfc_bsg_mbox_ext_session_reset(phba);
3799 return;
3800}
3801
3802/**
3803 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
3804 * @phba: Pointer to HBA context object.
3805 * @dmabuf: Pointer to a DMA buffer descriptor.
3806 *
3807 * This routine extracts the next mailbox read external buffer back to
3808 * user space through BSG.
3809 **/
3810static int
3811lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3812{
3813 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3814 struct lpfc_dmabuf *dmabuf;
3815 uint8_t *pbuf;
3816 uint32_t size;
3817 uint32_t index;
3818
3819 index = phba->mbox_ext_buf_ctx.seqNum;
3820 phba->mbox_ext_buf_ctx.seqNum++;
3821
3822 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3823 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3824
3825 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3826 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
3827 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
3828 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3829 "2963 SLI_CONFIG (mse) ext-buffer rd get "
3830 "buffer[%d], size:%d\n", index, size);
3831 } else {
3832 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3833 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
3834 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3835 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
3836 "buffer[%d], size:%d\n", index, size);
3837 }
3838 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
3839 return -EPIPE;
3840 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3841 struct lpfc_dmabuf, list);
3842 list_del_init(&dmabuf->list);
3843 pbuf = (uint8_t *)dmabuf->virt;
3844 job->reply->reply_payload_rcv_len =
3845 sg_copy_from_buffer(job->reply_payload.sg_list,
3846 job->reply_payload.sg_cnt,
3847 pbuf, size);
3848
3849 lpfc_bsg_dma_page_free(phba, dmabuf);
3850
3851 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3852 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3853 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
3854 "command session done\n");
3855 lpfc_bsg_mbox_ext_session_reset(phba);
3856 }
3857
3858 job->reply->result = 0;
3859 job->job_done(job);
3860
3861 return SLI_CONFIG_HANDLED;
3862}
3863
3864/**
3865 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
3866 * @phba: Pointer to HBA context object.
3867 * @dmabuf: Pointer to a DMA buffer descriptor.
3868 *
3869 * This routine sets up the next mailbox read external buffer obtained
3870 * from user space through BSG.
3871 **/
3872static int
3873lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3874 struct lpfc_dmabuf *dmabuf)
3875{
3876 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3877 struct bsg_job_data *dd_data = NULL;
3878 LPFC_MBOXQ_t *pmboxq = NULL;
3879 MAILBOX_t *pmb;
3880 enum nemb_type nemb_tp;
3881 uint8_t *pbuf;
3882 uint32_t size;
3883 uint32_t index;
3884 int rc;
3885
3886 index = phba->mbox_ext_buf_ctx.seqNum;
3887 phba->mbox_ext_buf_ctx.seqNum++;
3888 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
3889
3890 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3891 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3892
3893 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3894 if (!dd_data) {
3895 rc = -ENOMEM;
3896 goto job_error;
3897 }
3898
3899 pbuf = (uint8_t *)dmabuf->virt;
3900 size = job->request_payload.payload_len;
3901 sg_copy_to_buffer(job->request_payload.sg_list,
3902 job->request_payload.sg_cnt,
3903 pbuf, size);
3904
3905 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3906 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3907 "2966 SLI_CONFIG (mse) ext-buffer wr set "
3908 "buffer[%d], size:%d\n",
3909 phba->mbox_ext_buf_ctx.seqNum, size);
3910
3911 } else {
3912 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3913 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
3914 "buffer[%d], size:%d\n",
3915 phba->mbox_ext_buf_ctx.seqNum, size);
3916
3917 }
3918
3919 /* set up external buffer descriptor and add to external buffer list */
3920 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
3921 phba->mbox_ext_buf_ctx.mbx_dmabuf,
3922 dmabuf);
3923 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3924
3925 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3926 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3927 "2968 SLI_CONFIG ext-buffer wr all %d "
3928 "ebuffers received\n",
3929 phba->mbox_ext_buf_ctx.numBuf);
3930 /* mailbox command structure for base driver */
3931 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3932 if (!pmboxq) {
3933 rc = -ENOMEM;
3934 goto job_error;
3935 }
3936 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3937 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3938 pmb = &pmboxq->u.mb;
3939 memcpy(pmb, pbuf, sizeof(*pmb));
3940 pmb->mbxOwner = OWN_HOST;
3941 pmboxq->vport = phba->pport;
3942
3943 /* callback for multi-buffer write mailbox command */
3944 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3945
3946 /* context fields to callback function */
3947 pmboxq->context1 = dd_data;
3948 dd_data->type = TYPE_MBOX;
3949 dd_data->context_un.mbox.pmboxq = pmboxq;
3950 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
3951 dd_data->context_un.mbox.set_job = job;
3952 job->dd_data = dd_data;
3953
3954 /* state change */
3955 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3956
3957 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3958 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3959 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3960 "2969 Issued SLI_CONFIG ext-buffer "
3961 "maibox command, rc:x%x\n", rc);
3962 return 1;
3963 }
3964 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3965 "2970 Failed to issue SLI_CONFIG ext-buffer "
3966 "maibox command, rc:x%x\n", rc);
3967 rc = -EPIPE;
3968 goto job_error;
3969 }
3970
3971 /* wait for additoinal external buffers */
3972 job->reply->result = 0;
3973 job->job_done(job);
3974 return SLI_CONFIG_HANDLED;
3975
3976job_error:
3977 lpfc_bsg_dma_page_free(phba, dmabuf);
3978 kfree(dd_data);
3979
3980 return rc;
3981}
3982
3983/**
3984 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
3985 * @phba: Pointer to HBA context object.
3986 * @mb: Pointer to a BSG mailbox object.
3987 * @dmabuff: Pointer to a DMA buffer descriptor.
3988 *
3989 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
3990 * command with multiple non-embedded external buffers.
3991 **/
3992static int
3993lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
3994 struct lpfc_dmabuf *dmabuf)
3995{
3996 int rc;
3997
3998 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3999 "2971 SLI_CONFIG buffer (type:x%x)\n",
4000 phba->mbox_ext_buf_ctx.mboxType);
4001
4002 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4003 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4004 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4005 "2972 SLI_CONFIG rd buffer state "
4006 "mismatch:x%x\n",
4007 phba->mbox_ext_buf_ctx.state);
4008 lpfc_bsg_mbox_ext_abort(phba);
4009 return -EPIPE;
4010 }
4011 rc = lpfc_bsg_read_ebuf_get(phba, job);
4012 if (rc == SLI_CONFIG_HANDLED)
4013 lpfc_bsg_dma_page_free(phba, dmabuf);
4014 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4015 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4016 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4017 "2973 SLI_CONFIG wr buffer state "
4018 "mismatch:x%x\n",
4019 phba->mbox_ext_buf_ctx.state);
4020 lpfc_bsg_mbox_ext_abort(phba);
4021 return -EPIPE;
4022 }
4023 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4024 }
4025 return rc;
4026}
4027
4028/**
4029 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4030 * @phba: Pointer to HBA context object.
4031 * @mb: Pointer to a BSG mailbox object.
4032 * @dmabuff: Pointer to a DMA buffer descriptor.
4033 *
4034 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4035 * (0x9B) mailbox commands and external buffers.
4036 **/
4037static int
4038lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4039 struct lpfc_dmabuf *dmabuf)
4040{
4041 struct dfc_mbox_req *mbox_req;
4042 int rc;
4043
4044 mbox_req =
4045 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4046
4047 /* mbox command with/without single external buffer */
4048 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4049 return SLI_CONFIG_NOT_HANDLED;
4050
4051 /* mbox command and first external buffer */
4052 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4053 if (mbox_req->extSeqNum == 1) {
4054 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4055 "2974 SLI_CONFIG mailbox: tag:%d, "
4056 "seq:%d\n", mbox_req->extMboxTag,
4057 mbox_req->extSeqNum);
4058 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4059 return rc;
4060 } else
4061 goto sli_cfg_ext_error;
4062 }
4063
4064 /*
4065 * handle additional external buffers
4066 */
4067
4068 /* check broken pipe conditions */
4069 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4070 goto sli_cfg_ext_error;
4071 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4072 goto sli_cfg_ext_error;
4073 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4074 goto sli_cfg_ext_error;
4075
4076 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4077 "2975 SLI_CONFIG mailbox external buffer: "
4078 "extSta:x%x, tag:%d, seq:%d\n",
4079 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4080 mbox_req->extSeqNum);
4081 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4082 return rc;
4083
4084sli_cfg_ext_error:
4085 /* all other cases, broken pipe */
4086 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4087 "2976 SLI_CONFIG mailbox broken pipe: "
4088 "ctxSta:x%x, ctxNumBuf:%d "
4089 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4090 phba->mbox_ext_buf_ctx.state,
4091 phba->mbox_ext_buf_ctx.numBuf,
4092 phba->mbox_ext_buf_ctx.mbxTag,
4093 phba->mbox_ext_buf_ctx.seqNum,
4094 mbox_req->extMboxTag, mbox_req->extSeqNum);
4095
4096 lpfc_bsg_mbox_ext_session_reset(phba);
4097
4098 return -EPIPE;
4099}
4100
4101/**
2622 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4102 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2623 * @phba: Pointer to HBA context object. 4103 * @phba: Pointer to HBA context object.
2624 * @mb: Pointer to a mailbox object. 4104 * @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2638 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4118 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2639 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4119 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2640 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4120 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2641 MAILBOX_t *mb = NULL; 4121 uint8_t *pmbx = NULL;
2642 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4122 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
2643 uint32_t size; 4123 struct lpfc_dmabuf *dmabuf = NULL;
2644 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */ 4124 struct dfc_mbox_req *mbox_req;
2645 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2646 struct ulp_bde64 *rxbpl = NULL;
2647 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2648 job->request->rqst_data.h_vendor.vendor_cmd;
2649 struct READ_EVENT_LOG_VAR *rdEventLog; 4125 struct READ_EVENT_LOG_VAR *rdEventLog;
2650 uint32_t transmit_length, receive_length, mode; 4126 uint32_t transmit_length, receive_length, mode;
4127 struct lpfc_mbx_sli4_config *sli4_config;
2651 struct lpfc_mbx_nembed_cmd *nembed_sge; 4128 struct lpfc_mbx_nembed_cmd *nembed_sge;
2652 struct mbox_header *header; 4129 struct mbox_header *header;
2653 struct ulp_bde64 *bde; 4130 struct ulp_bde64 *bde;
2654 uint8_t *ext = NULL; 4131 uint8_t *ext = NULL;
2655 int rc = 0; 4132 int rc = 0;
2656 uint8_t *from; 4133 uint8_t *from;
4134 uint32_t size;
4135
2657 4136
2658 /* in case no data is transferred */ 4137 /* in case no data is transferred */
2659 job->reply->reply_payload_rcv_len = 0; 4138 job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2665 goto job_done; 4144 goto job_done;
2666 } 4145 }
2667 4146
4147 /*
4148 * Don't allow mailbox commands to be sent when blocked or when in
4149 * the middle of discovery
4150 */
4151 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4152 rc = -EAGAIN;
4153 goto job_done;
4154 }
4155
4156 mbox_req =
4157 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4158
2668 /* check if requested extended data lengths are valid */ 4159 /* check if requested extended data lengths are valid */
2669 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4160 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
2670 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4161 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2672 goto job_done; 4163 goto job_done;
2673 } 4164 }
2674 4165
4166 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4167 if (!dmabuf || !dmabuf->virt) {
4168 rc = -ENOMEM;
4169 goto job_done;
4170 }
4171
4172 /* Get the mailbox command or external buffer from BSG */
4173 pmbx = (uint8_t *)dmabuf->virt;
4174 size = job->request_payload.payload_len;
4175 sg_copy_to_buffer(job->request_payload.sg_list,
4176 job->request_payload.sg_cnt, pmbx, size);
4177
4178 /* Handle possible SLI_CONFIG with non-embedded payloads */
4179 if (phba->sli_rev == LPFC_SLI_REV4) {
4180 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4181 if (rc == SLI_CONFIG_HANDLED)
4182 goto job_cont;
4183 if (rc)
4184 goto job_done;
4185 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4186 }
4187
4188 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4189 if (rc != 0)
4190 goto job_done; /* must be negative */
4191
2675 /* allocate our bsg tracking structure */ 4192 /* allocate our bsg tracking structure */
2676 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4193 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2677 if (!dd_data) { 4194 if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2681 goto job_done; 4198 goto job_done;
2682 } 4199 }
2683 4200
2684 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
2685 if (!mb) {
2686 rc = -ENOMEM;
2687 goto job_done;
2688 }
2689
2690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4201 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2691 if (!pmboxq) { 4202 if (!pmboxq) {
2692 rc = -ENOMEM; 4203 rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2694 } 4205 }
2695 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4206 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2696 4207
2697 size = job->request_payload.payload_len;
2698 sg_copy_to_buffer(job->request_payload.sg_list,
2699 job->request_payload.sg_cnt,
2700 mb, size);
2701
2702 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2703 if (rc != 0)
2704 goto job_done; /* must be negative */
2705
2706 pmb = &pmboxq->u.mb; 4208 pmb = &pmboxq->u.mb;
2707 memcpy(pmb, mb, sizeof(*pmb)); 4209 memcpy(pmb, pmbx, sizeof(*pmb));
2708 pmb->mbxOwner = OWN_HOST; 4210 pmb->mbxOwner = OWN_HOST;
2709 pmboxq->vport = vport; 4211 pmboxq->vport = vport;
2710 4212
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2721 "0x%x while in stopped state.\n", 4223 "0x%x while in stopped state.\n",
2722 pmb->mbxCommand); 4224 pmb->mbxCommand);
2723 4225
2724 /* Don't allow mailbox commands to be sent when blocked
2725 * or when in the middle of discovery
2726 */
2727 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2728 rc = -EAGAIN;
2729 goto job_done;
2730 }
2731
2732 /* extended mailbox commands will need an extended buffer */ 4226 /* extended mailbox commands will need an extended buffer */
2733 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4227 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
2734 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2735 if (!ext) {
2736 rc = -ENOMEM;
2737 goto job_done;
2738 }
2739
2740 /* any data for the device? */ 4228 /* any data for the device? */
2741 if (mbox_req->inExtWLen) { 4229 if (mbox_req->inExtWLen) {
2742 from = (uint8_t *)mb; 4230 from = pmbx;
2743 from += sizeof(MAILBOX_t); 4231 ext = from + sizeof(MAILBOX_t);
2744 memcpy((uint8_t *)ext, from,
2745 mbox_req->inExtWLen * sizeof(uint32_t));
2746 } 4232 }
2747
2748 pmboxq->context2 = ext; 4233 pmboxq->context2 = ext;
2749 pmboxq->in_ext_byte_len = 4234 pmboxq->in_ext_byte_len =
2750 mbox_req->inExtWLen * sizeof(uint32_t); 4235 mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2768 rc = -ERANGE; 4253 rc = -ERANGE;
2769 goto job_done; 4254 goto job_done;
2770 } 4255 }
2771
2772 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2773 if (!rxbmp) {
2774 rc = -ENOMEM;
2775 goto job_done;
2776 }
2777
2778 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2779 if (!rxbmp->virt) {
2780 rc = -ENOMEM;
2781 goto job_done;
2782 }
2783
2784 INIT_LIST_HEAD(&rxbmp->list);
2785 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2786 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
2787 if (!dmp) {
2788 rc = -ENOMEM;
2789 goto job_done;
2790 }
2791
2792 INIT_LIST_HEAD(&dmp->dma.list);
2793 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4256 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2794 putPaddrHigh(dmp->dma.phys); 4257 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
2795 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4258 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2796 putPaddrLow(dmp->dma.phys); 4259 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
2797 4260
2798 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4261 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2799 putPaddrHigh(dmp->dma.phys + 4262 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
2800 pmb->un.varBIUdiag.un.s2. 4263 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2801 xmit_bde64.tus.f.bdeSize);
2802 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4264 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2803 putPaddrLow(dmp->dma.phys + 4265 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
2804 pmb->un.varBIUdiag.un.s2. 4266 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2805 xmit_bde64.tus.f.bdeSize);
2806
2807 /* copy the transmit data found in the mailbox extension area */
2808 from = (uint8_t *)mb;
2809 from += sizeof(MAILBOX_t);
2810 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2811 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4267 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2812 rdEventLog = &pmb->un.varRdEventLog; 4268 rdEventLog = &pmb->un.varRdEventLog;
2813 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4269 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2823 4279
2824 /* mode zero uses a bde like biu diags command */ 4280 /* mode zero uses a bde like biu diags command */
2825 if (mode == 0) { 4281 if (mode == 0) {
2826 4282 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2827 /* rebuild the command for sli4 using our own buffers 4283 + sizeof(MAILBOX_t));
2828 * like we do for biu diags 4284 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2829 */ 4285 + sizeof(MAILBOX_t));
2830
2831 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2832 if (!rxbmp) {
2833 rc = -ENOMEM;
2834 goto job_done;
2835 }
2836
2837 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2838 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2839 if (rxbpl) {
2840 INIT_LIST_HEAD(&rxbmp->list);
2841 dmp = diag_cmd_data_alloc(phba, rxbpl,
2842 receive_length, 0);
2843 }
2844
2845 if (!dmp) {
2846 rc = -ENOMEM;
2847 goto job_done;
2848 }
2849
2850 INIT_LIST_HEAD(&dmp->dma.list);
2851 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2852 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2853 } 4286 }
2854 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4287 } else if (phba->sli_rev == LPFC_SLI_REV4) {
2855 if (pmb->mbxCommand == MBX_DUMP_MEMORY) { 4288 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2860 /* receive length cannot be greater than mailbox 4293 /* receive length cannot be greater than mailbox
2861 * extension size 4294 * extension size
2862 */ 4295 */
2863 if ((receive_length == 0) || 4296 if (receive_length == 0) {
2864 (receive_length > MAILBOX_EXT_SIZE)) {
2865 rc = -ERANGE; 4297 rc = -ERANGE;
2866 goto job_done; 4298 goto job_done;
2867 } 4299 }
2868 4300 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2869 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4301 + sizeof(MAILBOX_t));
2870 if (!rxbmp) { 4302 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2871 rc = -ENOMEM; 4303 + sizeof(MAILBOX_t));
2872 goto job_done;
2873 }
2874
2875 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2876 if (!rxbmp->virt) {
2877 rc = -ENOMEM;
2878 goto job_done;
2879 }
2880
2881 INIT_LIST_HEAD(&rxbmp->list);
2882 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2883 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2884 0);
2885 if (!dmp) {
2886 rc = -ENOMEM;
2887 goto job_done;
2888 }
2889
2890 INIT_LIST_HEAD(&dmp->dma.list);
2891 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2892 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2893 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4304 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2894 pmb->un.varUpdateCfg.co) { 4305 pmb->un.varUpdateCfg.co) {
2895 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4306 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2899 rc = -ERANGE; 4310 rc = -ERANGE;
2900 goto job_done; 4311 goto job_done;
2901 } 4312 }
2902 4313 bde->addrHigh = putPaddrHigh(dmabuf->phys
2903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4314 + sizeof(MAILBOX_t));
2904 if (!rxbmp) { 4315 bde->addrLow = putPaddrLow(dmabuf->phys
2905 rc = -ENOMEM; 4316 + sizeof(MAILBOX_t));
2906 goto job_done;
2907 }
2908
2909 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2910 if (!rxbmp->virt) {
2911 rc = -ENOMEM;
2912 goto job_done;
2913 }
2914
2915 INIT_LIST_HEAD(&rxbmp->list);
2916 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2917 dmp = diag_cmd_data_alloc(phba, rxbpl,
2918 bde->tus.f.bdeSize, 0);
2919 if (!dmp) {
2920 rc = -ENOMEM;
2921 goto job_done;
2922 }
2923
2924 INIT_LIST_HEAD(&dmp->dma.list);
2925 bde->addrHigh = putPaddrHigh(dmp->dma.phys);
2926 bde->addrLow = putPaddrLow(dmp->dma.phys);
2927
2928 /* copy the transmit data found in the mailbox
2929 * extension area
2930 */
2931 from = (uint8_t *)mb;
2932 from += sizeof(MAILBOX_t);
2933 memcpy((uint8_t *)dmp->dma.virt, from,
2934 bde->tus.f.bdeSize);
2935 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4317 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2936 /* rebuild the command for sli4 using our own buffers 4318 /* Handling non-embedded SLI_CONFIG mailbox command */
2937 * like we do for biu diags 4319 sli4_config = &pmboxq->u.mqe.un.sli4_config;
2938 */ 4320 if (!bf_get(lpfc_mbox_hdr_emb,
2939 header = (struct mbox_header *)&pmb->un.varWords[0]; 4321 &sli4_config->header.cfg_mhdr)) {
2940 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4322 /* rebuild the command for sli4 using our
2941 &pmb->un.varWords[0]; 4323 * own buffers like we do for biu diags
2942 receive_length = nembed_sge->sge[0].length; 4324 */
2943 4325 header = (struct mbox_header *)
2944 /* receive length cannot be greater than mailbox 4326 &pmb->un.varWords[0];
2945 * extension size 4327 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2946 */ 4328 &pmb->un.varWords[0];
2947 if ((receive_length == 0) || 4329 receive_length = nembed_sge->sge[0].length;
2948 (receive_length > MAILBOX_EXT_SIZE)) { 4330
2949 rc = -ERANGE; 4331 /* receive length cannot be greater than
2950 goto job_done; 4332 * mailbox extension size
2951 } 4333 */
2952 4334 if ((receive_length == 0) ||
2953 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4335 (receive_length > MAILBOX_EXT_SIZE)) {
2954 if (!rxbmp) { 4336 rc = -ERANGE;
2955 rc = -ENOMEM; 4337 goto job_done;
2956 goto job_done; 4338 }
2957 }
2958
2959 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2960 if (!rxbmp->virt) {
2961 rc = -ENOMEM;
2962 goto job_done;
2963 }
2964 4339
2965 INIT_LIST_HEAD(&rxbmp->list); 4340 nembed_sge->sge[0].pa_hi =
2966 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 4341 putPaddrHigh(dmabuf->phys
2967 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 4342 + sizeof(MAILBOX_t));
2968 0); 4343 nembed_sge->sge[0].pa_lo =
2969 if (!dmp) { 4344 putPaddrLow(dmabuf->phys
2970 rc = -ENOMEM; 4345 + sizeof(MAILBOX_t));
2971 goto job_done;
2972 } 4346 }
2973
2974 INIT_LIST_HEAD(&dmp->dma.list);
2975 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
2976 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
2977 /* copy the transmit data found in the mailbox
2978 * extension area
2979 */
2980 from = (uint8_t *)mb;
2981 from += sizeof(MAILBOX_t);
2982 memcpy((uint8_t *)dmp->dma.virt, from,
2983 header->cfg_mhdr.payload_length);
2984 } 4347 }
2985 } 4348 }
2986 4349
2987 dd_data->context_un.mbox.rxbmp = rxbmp; 4350 dd_data->context_un.mbox.dmabuffers = dmabuf;
2988 dd_data->context_un.mbox.dmp = dmp;
2989 4351
2990 /* setup wake call as IOCB callback */ 4352 /* setup wake call as IOCB callback */
2991 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; 4353 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
2992 4354
2993 /* setup context field to pass wait_queue pointer to wake function */ 4355 /* setup context field to pass wait_queue pointer to wake function */
2994 pmboxq->context1 = dd_data; 4356 pmboxq->context1 = dd_data;
2995 dd_data->type = TYPE_MBOX; 4357 dd_data->type = TYPE_MBOX;
2996 dd_data->context_un.mbox.pmboxq = pmboxq; 4358 dd_data->context_un.mbox.pmboxq = pmboxq;
2997 dd_data->context_un.mbox.mb = mb; 4359 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
2998 dd_data->context_un.mbox.set_job = job; 4360 dd_data->context_un.mbox.set_job = job;
2999 dd_data->context_un.mbox.ext = ext; 4361 dd_data->context_un.mbox.ext = ext;
3000 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4362 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3011 } 4373 }
3012 4374
3013 /* job finished, copy the data */ 4375 /* job finished, copy the data */
3014 memcpy(mb, pmb, sizeof(*pmb)); 4376 memcpy(pmbx, pmb, sizeof(*pmb));
3015 job->reply->reply_payload_rcv_len = 4377 job->reply->reply_payload_rcv_len =
3016 sg_copy_from_buffer(job->reply_payload.sg_list, 4378 sg_copy_from_buffer(job->reply_payload.sg_list,
3017 job->reply_payload.sg_cnt, 4379 job->reply_payload.sg_cnt,
3018 mb, size); 4380 pmbx, size);
3019 /* not waiting mbox already done */ 4381 /* not waiting mbox already done */
3020 rc = 0; 4382 rc = 0;
3021 goto job_done; 4383 goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3027 4389
3028job_done: 4390job_done:
3029 /* common exit for error or job completed inline */ 4391 /* common exit for error or job completed inline */
3030 kfree(mb);
3031 if (pmboxq) 4392 if (pmboxq)
3032 mempool_free(pmboxq, phba->mbox_mem_pool); 4393 mempool_free(pmboxq, phba->mbox_mem_pool);
3033 kfree(ext); 4394 lpfc_bsg_dma_page_free(phba, dmabuf);
3034 if (dmp) {
3035 dma_free_coherent(&phba->pcidev->dev,
3036 dmp->size, dmp->dma.virt,
3037 dmp->dma.phys);
3038 kfree(dmp);
3039 }
3040 if (rxbmp) {
3041 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3042 kfree(rxbmp);
3043 }
3044 kfree(dd_data); 4395 kfree(dd_data);
3045 4396
4397job_cont:
3046 return rc; 4398 return rc;
3047} 4399}
3048 4400
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
3055{ 4407{
3056 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4408 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3057 struct lpfc_hba *phba = vport->phba; 4409 struct lpfc_hba *phba = vport->phba;
4410 struct dfc_mbox_req *mbox_req;
3058 int rc = 0; 4411 int rc = 0;
3059 4412
3060 /* in case no data is transferred */ 4413 /* mix-and-match backward compatibility */
3061 job->reply->reply_payload_rcv_len = 0; 4414 job->reply->reply_payload_rcv_len = 0;
3062 if (job->request_len < 4415 if (job->request_len <
3063 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4416 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4417 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3065 "2737 Received MBOX_REQ request below " 4418 "2737 Mix-and-match backward compability "
3066 "minimum size\n"); 4419 "between MBOX_REQ old size:%d and "
3067 rc = -EINVAL; 4420 "new request size:%d\n",
3068 goto job_error; 4421 (int)(job->request_len -
3069 } 4422 sizeof(struct fc_bsg_request)),
3070 4423 (int)sizeof(struct dfc_mbox_req));
3071 if (job->request_payload.payload_len != BSG_MBOX_SIZE) { 4424 mbox_req = (struct dfc_mbox_req *)
3072 rc = -EINVAL; 4425 job->request->rqst_data.h_vendor.vendor_cmd;
3073 goto job_error; 4426 mbox_req->extMboxTag = 0;
3074 } 4427 mbox_req->extSeqNum = 0;
3075
3076 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
3077 rc = -EINVAL;
3078 goto job_error;
3079 }
3080
3081 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
3082 rc = -EAGAIN;
3083 goto job_error;
3084 } 4428 }
3085 4429
3086 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4430 rc = lpfc_bsg_issue_mbox(phba, job, vport);
3087 4431
3088job_error:
3089 if (rc == 0) { 4432 if (rc == 0) {
3090 /* job done */ 4433 /* job done */
3091 job->reply->result = 0; 4434 job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
3416 rc = lpfc_bsg_send_mgmt_rsp(job); 4759 rc = lpfc_bsg_send_mgmt_rsp(job);
3417 break; 4760 break;
3418 case LPFC_BSG_VENDOR_DIAG_MODE: 4761 case LPFC_BSG_VENDOR_DIAG_MODE:
3419 rc = lpfc_bsg_diag_mode(job); 4762 rc = lpfc_bsg_diag_loopback_mode(job);
4763 break;
4764 case LPFC_BSG_VENDOR_DIAG_MODE_END:
4765 rc = lpfc_sli4_bsg_diag_mode_end(job);
4766 break;
4767 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
4768 rc = lpfc_bsg_diag_loopback_run(job);
3420 break; 4769 break;
3421 case LPFC_BSG_VENDOR_DIAG_TEST: 4770 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
3422 rc = lpfc_bsg_diag_test(job); 4771 rc = lpfc_sli4_bsg_link_diag_test(job);
3423 break; 4772 break;
3424 case LPFC_BSG_VENDOR_GET_MGMT_REV: 4773 case LPFC_BSG_VENDOR_GET_MGMT_REV:
3425 rc = lpfc_bsg_get_dfc_rev(job); 4774 rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
3538 /* the mbox completion handler can now be run */ 4887 /* the mbox completion handler can now be run */
3539 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4888 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3540 job->job_done(job); 4889 job->job_done(job);
4890 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4891 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3541 break; 4892 break;
3542 case TYPE_MENLO: 4893 case TYPE_MENLO:
3543 menlo = &dd_data->context_un.menlo; 4894 menlo = &dd_data->context_un.menlo;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index b542aca6f5ae..c8c2b47ea886 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -24,15 +24,17 @@
24 * These are the vendor unique structures passed in using the bsg 24 * These are the vendor unique structures passed in using the bsg
25 * FC_BSG_HST_VENDOR message code type. 25 * FC_BSG_HST_VENDOR message code type.
26 */ 26 */
27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1 27#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2 28#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3 29#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
30#define LPFC_BSG_VENDOR_DIAG_MODE 4 30#define LPFC_BSG_VENDOR_DIAG_MODE 4
31#define LPFC_BSG_VENDOR_DIAG_TEST 5 31#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6 32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7 33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8 34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9 35#define LPFC_BSG_VENDOR_MENLO_DATA 9
36#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
37#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
36 38
37struct set_ct_event { 39struct set_ct_event {
38 uint32_t command; 40 uint32_t command;
@@ -67,10 +69,25 @@ struct diag_mode_set {
67 uint32_t timeout; 69 uint32_t timeout;
68}; 70};
69 71
72struct sli4_link_diag {
73 uint32_t command;
74 uint32_t timeout;
75 uint32_t test_id;
76 uint32_t loops;
77 uint32_t test_version;
78 uint32_t error_action;
79};
80
70struct diag_mode_test { 81struct diag_mode_test {
71 uint32_t command; 82 uint32_t command;
72}; 83};
73 84
85struct diag_status {
86 uint32_t mbox_status;
87 uint32_t shdr_status;
88 uint32_t shdr_add_status;
89};
90
74#define LPFC_WWNN_TYPE 0 91#define LPFC_WWNN_TYPE 0
75#define LPFC_WWPN_TYPE 1 92#define LPFC_WWPN_TYPE 1
76 93
@@ -92,11 +109,15 @@ struct get_mgmt_rev_reply {
92}; 109};
93 110
94#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */ 111#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
112
113/* BSG mailbox request header */
95struct dfc_mbox_req { 114struct dfc_mbox_req {
96 uint32_t command; 115 uint32_t command;
97 uint32_t mbOffset; 116 uint32_t mbOffset;
98 uint32_t inExtWLen; 117 uint32_t inExtWLen;
99 uint32_t outExtWLen; 118 uint32_t outExtWLen;
119 uint32_t extMboxTag;
120 uint32_t extSeqNum;
100}; 121};
101 122
102/* Used for menlo command or menlo data. The xri is only used for menlo data */ 123/* Used for menlo command or menlo data. The xri is only used for menlo data */
@@ -171,7 +192,7 @@ struct lpfc_sli_config_mse {
171#define lpfc_mbox_sli_config_mse_len_WORD buf_len 192#define lpfc_mbox_sli_config_mse_len_WORD buf_len
172}; 193};
173 194
174struct lpfc_sli_config_subcmd_hbd { 195struct lpfc_sli_config_hbd {
175 uint32_t buf_len; 196 uint32_t buf_len;
176#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0 197#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0
177#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff 198#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff
@@ -194,21 +215,39 @@ struct lpfc_sli_config_hdr {
194 uint32_t reserved5; 215 uint32_t reserved5;
195}; 216};
196 217
197struct lpfc_sli_config_generic { 218struct lpfc_sli_config_emb0_subsys {
198 struct lpfc_sli_config_hdr sli_config_hdr; 219 struct lpfc_sli_config_hdr sli_config_hdr;
199#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19 220#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19
200 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE]; 221 struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
222 uint32_t padding;
223 uint32_t word64;
224#define lpfc_emb0_subcmnd_opcode_SHIFT 0
225#define lpfc_emb0_subcmnd_opcode_MASK 0xff
226#define lpfc_emb0_subcmnd_opcode_WORD word64
227#define lpfc_emb0_subcmnd_subsys_SHIFT 8
228#define lpfc_emb0_subcmnd_subsys_MASK 0xff
229#define lpfc_emb0_subcmnd_subsys_WORD word64
230/* Subsystem FCOE (0x0C) OpCodes */
231#define SLI_CONFIG_SUBSYS_FCOE 0x0C
232#define FCOE_OPCODE_READ_FCF 0x08
233#define FCOE_OPCODE_ADD_FCF 0x09
201}; 234};
202 235
203struct lpfc_sli_config_subcmnd { 236struct lpfc_sli_config_emb1_subsys {
204 struct lpfc_sli_config_hdr sli_config_hdr; 237 struct lpfc_sli_config_hdr sli_config_hdr;
205 uint32_t word6; 238 uint32_t word6;
206#define lpfc_subcmnd_opcode_SHIFT 0 239#define lpfc_emb1_subcmnd_opcode_SHIFT 0
207#define lpfc_subcmnd_opcode_MASK 0xff 240#define lpfc_emb1_subcmnd_opcode_MASK 0xff
208#define lpfc_subcmnd_opcode_WORD word6 241#define lpfc_emb1_subcmnd_opcode_WORD word6
209#define lpfc_subcmnd_subsys_SHIFT 8 242#define lpfc_emb1_subcmnd_subsys_SHIFT 8
210#define lpfc_subcmnd_subsys_MASK 0xff 243#define lpfc_emb1_subcmnd_subsys_MASK 0xff
211#define lpfc_subcmnd_subsys_WORD word6 244#define lpfc_emb1_subcmnd_subsys_WORD word6
245/* Subsystem COMN (0x01) OpCodes */
246#define SLI_CONFIG_SUBSYS_COMN 0x01
247#define COMN_OPCODE_READ_OBJECT 0xAB
248#define COMN_OPCODE_WRITE_OBJECT 0xAC
249#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
250#define COMN_OPCODE_DELETE_OBJECT 0xAE
212 uint32_t timeout; 251 uint32_t timeout;
213 uint32_t request_length; 252 uint32_t request_length;
214 uint32_t word9; 253 uint32_t word9;
@@ -222,8 +261,8 @@ struct lpfc_sli_config_subcmnd {
222 uint32_t rd_offset; 261 uint32_t rd_offset;
223 uint32_t obj_name[26]; 262 uint32_t obj_name[26];
224 uint32_t hbd_count; 263 uint32_t hbd_count;
225#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10 264#define LPFC_MBX_SLI_CONFIG_MAX_HBD 8
226 struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD]; 265 struct lpfc_sli_config_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
227}; 266};
228 267
229struct lpfc_sli_config_mbox { 268struct lpfc_sli_config_mbox {
@@ -235,7 +274,11 @@ struct lpfc_sli_config_mbox {
235#define lpfc_mqe_command_MASK 0x000000FF 274#define lpfc_mqe_command_MASK 0x000000FF
236#define lpfc_mqe_command_WORD word0 275#define lpfc_mqe_command_WORD word0
237 union { 276 union {
238 struct lpfc_sli_config_generic sli_config_generic; 277 struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
239 struct lpfc_sli_config_subcmnd sli_config_subcmnd; 278 struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
240 } un; 279 } un;
241}; 280};
281
282/* driver only */
283#define SLI_CONFIG_NOT_HANDLED 0
284#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f0b332f4eedb..fc20c247f36b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,6 +55,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
55void lpfc_supported_pages(struct lpfcMboxq *); 55void lpfc_supported_pages(struct lpfcMboxq *);
56void lpfc_pc_sli4_params(struct lpfcMboxq *); 56void lpfc_pc_sli4_params(struct lpfcMboxq *);
57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 57int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
58int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
59 uint16_t, uint16_t, bool);
58int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); 60int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
59struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 61struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
60void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); 62void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
@@ -171,6 +173,7 @@ void lpfc_delayed_disc_tmo(unsigned long);
171void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); 173void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
172 174
173int lpfc_config_port_prep(struct lpfc_hba *); 175int lpfc_config_port_prep(struct lpfc_hba *);
176void lpfc_update_vport_wwn(struct lpfc_vport *vport);
174int lpfc_config_port_post(struct lpfc_hba *); 177int lpfc_config_port_post(struct lpfc_hba *);
175int lpfc_hba_down_prep(struct lpfc_hba *); 178int lpfc_hba_down_prep(struct lpfc_hba *);
176int lpfc_hba_down_post(struct lpfc_hba *); 179int lpfc_hba_down_post(struct lpfc_hba *);
@@ -365,6 +368,10 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
365 uint32_t, uint32_t); 368 uint32_t, uint32_t);
366extern struct lpfc_hbq_init *lpfc_hbq_defs[]; 369extern struct lpfc_hbq_init *lpfc_hbq_defs[];
367 370
371/* SLI4 if_type 2 externs. */
372int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
373int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
374
368/* externs BlockGuard */ 375/* externs BlockGuard */
369extern char *_dump_buf_data; 376extern char *_dump_buf_data;
370extern unsigned long _dump_buf_data_order; 377extern unsigned long _dump_buf_data_order;
@@ -429,3 +436,6 @@ void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
429void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); 436void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
430struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, 437struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
431 uint32_t); 438 uint32_t);
439int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
440/* functions to support SR-IOV */
441int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d9edfd90d7ff..779b88e1469d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -352,6 +352,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
352 icmd->ulpLe = 1; 352 icmd->ulpLe = 1;
353 icmd->ulpClass = CLASS3; 353 icmd->ulpClass = CLASS3;
354 icmd->ulpContext = ndlp->nlp_rpi; 354 icmd->ulpContext = ndlp->nlp_rpi;
355 if (phba->sli_rev == LPFC_SLI_REV4)
356 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
355 357
356 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 358 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
357 /* For GEN_REQUEST64_CR, use the RPI */ 359 /* For GEN_REQUEST64_CR, use the RPI */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index c93fca058603..ffe82d169b40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1665,7 +1665,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1665 /* Get fast-path complete queue information */ 1665 /* Get fast-path complete queue information */
1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1666 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1667 "Fast-path FCP CQ information:\n"); 1667 "Fast-path FCP CQ information:\n");
1668 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 1668 fcp_qidx = 0;
1669 do {
1669 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1670 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
1670 "Associated EQID[%02d]:\n", 1671 "Associated EQID[%02d]:\n",
1671 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 1672 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
@@ -1678,7 +1679,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1678 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 1679 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
1679 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 1680 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
1680 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 1681 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
1681 } 1682 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
1682 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 1683 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
1683 1684
1684 /* Get mailbox queue information */ 1685 /* Get mailbox queue information */
@@ -2012,7 +2013,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2012 goto pass_check; 2013 goto pass_check;
2013 } 2014 }
2014 /* FCP complete queue */ 2015 /* FCP complete queue */
2015 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2016 qidx = 0;
2017 do {
2016 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { 2018 if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
2017 /* Sanity check */ 2019 /* Sanity check */
2018 rc = lpfc_idiag_que_param_check( 2020 rc = lpfc_idiag_que_param_check(
@@ -2024,7 +2026,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2024 phba->sli4_hba.fcp_cq[qidx]; 2026 phba->sli4_hba.fcp_cq[qidx];
2025 goto pass_check; 2027 goto pass_check;
2026 } 2028 }
2027 } 2029 } while (++qidx < phba->cfg_fcp_eq_count);
2028 goto error_out; 2030 goto error_out;
2029 break; 2031 break;
2030 case LPFC_IDIAG_MQ: 2032 case LPFC_IDIAG_MQ:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e2c452467c8b..32a084534f3e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -250,7 +250,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
250 icmd->un.elsreq64.myID = vport->fc_myDID; 250 icmd->un.elsreq64.myID = vport->fc_myDID;
251 251
252 /* For ELS_REQUEST64_CR, use the VPI by default */ 252 /* For ELS_REQUEST64_CR, use the VPI by default */
253 icmd->ulpContext = vport->vpi + phba->vpi_base; 253 icmd->ulpContext = phba->vpi_ids[vport->vpi];
254 icmd->ulpCt_h = 0; 254 icmd->ulpCt_h = 0;
255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 255 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
256 if (elscmd == ELS_CMD_ECHO) 256 if (elscmd == ELS_CMD_ECHO)
@@ -454,6 +454,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
454 rc = -ENOMEM; 454 rc = -ENOMEM;
455 goto fail_free_dmabuf; 455 goto fail_free_dmabuf;
456 } 456 }
457
457 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 458 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
458 if (!mboxq) { 459 if (!mboxq) {
459 rc = -ENOMEM; 460 rc = -ENOMEM;
@@ -6585,6 +6586,26 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6585{ 6586{
6586 struct lpfc_vport *vport; 6587 struct lpfc_vport *vport;
6587 unsigned long flags; 6588 unsigned long flags;
6589 int i;
6590
6591 /* The physical ports are always vpi 0 - translate is unnecessary. */
6592 if (vpi > 0) {
6593 /*
6594 * Translate the physical vpi to the logical vpi. The
6595 * vport stores the logical vpi.
6596 */
6597 for (i = 0; i < phba->max_vpi; i++) {
6598 if (vpi == phba->vpi_ids[i])
6599 break;
6600 }
6601
6602 if (i >= phba->max_vpi) {
6603 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
6604 "2936 Could not find Vport mapped "
6605 "to vpi %d\n", vpi);
6606 return NULL;
6607 }
6608 }
6588 6609
6589 spin_lock_irqsave(&phba->hbalock, flags); 6610 spin_lock_irqsave(&phba->hbalock, flags);
6590 list_for_each_entry(vport, &phba->port_list, listentry) { 6611 list_for_each_entry(vport, &phba->port_list, listentry) {
@@ -6641,8 +6662,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6641 vport = phba->pport; 6662 vport = phba->pport;
6642 else 6663 else
6643 vport = lpfc_find_vport_by_vpid(phba, 6664 vport = lpfc_find_vport_by_vpid(phba,
6644 icmd->unsli3.rcvsli3.vpi - phba->vpi_base); 6665 icmd->unsli3.rcvsli3.vpi);
6645 } 6666 }
6667
6646 /* If there are no BDEs associated 6668 /* If there are no BDEs associated
6647 * with this IOCB, there is nothing to do. 6669 * with this IOCB, there is nothing to do.
6648 */ 6670 */
@@ -7222,7 +7244,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7222 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; 7244 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
7223 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; 7245 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
7224 /* Set the ulpContext to the vpi */ 7246 /* Set the ulpContext to the vpi */
7225 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base; 7247 elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
7226 } else { 7248 } else {
7227 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 7249 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7228 icmd->ulpCt_h = 1; 7250 icmd->ulpCt_h = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7a35df5e2038..18d0dbfda2bc 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -881,7 +881,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
881 /* Clean up any firmware default rpi's */ 881 /* Clean up any firmware default rpi's */
882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 882 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
883 if (mb) { 883 if (mb) {
884 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); 884 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
885 mb->vport = vport; 885 mb->vport = vport;
886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 886 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 887 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
@@ -2690,16 +2690,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2690 2690
2691 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 2691 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
2692 sizeof (struct serv_parm)); 2692 sizeof (struct serv_parm));
2693 if (phba->cfg_soft_wwnn) 2693 lpfc_update_vport_wwn(vport);
2694 u64_to_wwn(phba->cfg_soft_wwnn,
2695 vport->fc_sparam.nodeName.u.wwn);
2696 if (phba->cfg_soft_wwpn)
2697 u64_to_wwn(phba->cfg_soft_wwpn,
2698 vport->fc_sparam.portName.u.wwn);
2699 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
2700 sizeof(vport->fc_nodename));
2701 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
2702 sizeof(vport->fc_portname));
2703 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2694 if (vport->port_type == LPFC_PHYSICAL_PORT) {
2704 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 2695 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
2705 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 2696 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3430,7 +3421,8 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3430 return; 3421 return;
3431 } 3422 }
3432 3423
3433 ndlp->nlp_rpi = mb->un.varWords[0]; 3424 if (phba->sli_rev < LPFC_SLI_REV4)
3425 ndlp->nlp_rpi = mb->un.varWords[0];
3434 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3426 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3435 ndlp->nlp_type |= NLP_FABRIC; 3427 ndlp->nlp_type |= NLP_FABRIC;
3436 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3428 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3504,7 +3496,8 @@ out:
3504 return; 3496 return;
3505 } 3497 }
3506 3498
3507 ndlp->nlp_rpi = mb->un.varWords[0]; 3499 if (phba->sli_rev < LPFC_SLI_REV4)
3500 ndlp->nlp_rpi = mb->un.varWords[0];
3508 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3501 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3509 ndlp->nlp_type |= NLP_FABRIC; 3502 ndlp->nlp_type |= NLP_FABRIC;
3510 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3503 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -3591,7 +3584,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3591 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 3584 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3592 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 3585 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3593 3586
3594
3595 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3587 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3596 fc_remote_port_rolechg(rport, rport_ids.roles); 3588 fc_remote_port_rolechg(rport, rport_ids.roles);
3597 3589
@@ -4106,11 +4098,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4106 struct lpfc_hba *phba = vport->phba; 4098 struct lpfc_hba *phba = vport->phba;
4107 LPFC_MBOXQ_t *mbox; 4099 LPFC_MBOXQ_t *mbox;
4108 int rc; 4100 int rc;
4101 uint16_t rpi;
4109 4102
4110 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4103 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4111 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4104 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4112 if (mbox) { 4105 if (mbox) {
4113 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 4106 /* SLI4 ports require the physical rpi value. */
4107 rpi = ndlp->nlp_rpi;
4108 if (phba->sli_rev == LPFC_SLI_REV4)
4109 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4110 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4114 mbox->vport = vport; 4111 mbox->vport = vport;
4115 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4112 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4116 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4179,7 +4176,8 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4179 4176
4180 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4177 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4181 if (mbox) { 4178 if (mbox) {
4182 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 4179 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4180 mbox);
4183 mbox->vport = vport; 4181 mbox->vport = vport;
4184 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4182 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4185 mbox->context1 = NULL; 4183 mbox->context1 = NULL;
@@ -4203,7 +4201,8 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4203 4201
4204 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4202 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4205 if (mbox) { 4203 if (mbox) {
4206 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); 4204 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4205 mbox);
4207 mbox->vport = vport; 4206 mbox->vport = vport;
4208 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4207 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4209 mbox->context1 = NULL; 4208 mbox->context1 = NULL;
@@ -4653,10 +4652,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
4653 if (num_sent) 4652 if (num_sent)
4654 return; 4653 return;
4655 4654
4656 /* 4655 /* Register the VPI for SLI3, NON-NPIV only. */
4657 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4658 * continue discovery.
4659 */
4660 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4656 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4661 !(vport->fc_flag & FC_PT2PT) && 4657 !(vport->fc_flag & FC_PT2PT) &&
4662 !(vport->fc_flag & FC_RSCN_MODE) && 4658 !(vport->fc_flag & FC_RSCN_MODE) &&
@@ -4943,7 +4939,7 @@ restart_disc:
4943 if (phba->sli_rev < LPFC_SLI_REV4) { 4939 if (phba->sli_rev < LPFC_SLI_REV4) {
4944 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 4940 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
4945 lpfc_issue_reg_vpi(phba, vport); 4941 lpfc_issue_reg_vpi(phba, vport);
4946 else { /* NPIV Not enabled */ 4942 else {
4947 lpfc_issue_clear_la(phba, vport); 4943 lpfc_issue_clear_la(phba, vport);
4948 vport->port_state = LPFC_VPORT_READY; 4944 vport->port_state = LPFC_VPORT_READY;
4949 } 4945 }
@@ -5069,7 +5065,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5069 pmb->context1 = NULL; 5065 pmb->context1 = NULL;
5070 pmb->context2 = NULL; 5066 pmb->context2 = NULL;
5071 5067
5072 ndlp->nlp_rpi = mb->un.varWords[0]; 5068 if (phba->sli_rev < LPFC_SLI_REV4)
5069 ndlp->nlp_rpi = mb->un.varWords[0];
5073 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 5070 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5074 ndlp->nlp_type |= NLP_FABRIC; 5071 ndlp->nlp_type |= NLP_FABRIC;
5075 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5072 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -5354,6 +5351,17 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
5354 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5351 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5355 shost = lpfc_shost_from_vport(vports[i]); 5352 shost = lpfc_shost_from_vport(vports[i]);
5356 spin_lock_irq(shost->host_lock); 5353 spin_lock_irq(shost->host_lock);
5354 /*
5355 * IF the CVL_RCVD bit is not set then we have sent the
5356 * flogi.
5357 * If dev_loss fires while we are waiting we do not want to
5358 * unreg the fcf.
5359 */
5360 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
5361 spin_unlock_irq(shost->host_lock);
5362 ret = 1;
5363 goto out;
5364 }
5357 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 5365 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5358 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && 5366 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
5359 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 5367 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 86b6f7e6686a..9059524cf225 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,8 @@
64#define SLI3_IOCB_CMD_SIZE 128 64#define SLI3_IOCB_CMD_SIZE 128
65#define SLI3_IOCB_RSP_SIZE 64 65#define SLI3_IOCB_RSP_SIZE 64
66 66
67#define LPFC_UNREG_ALL_RPIS_VPORT 0xffff
68#define LPFC_UNREG_ALL_DFLT_RPIS 0xffffffff
67 69
68/* vendor ID used in SCSI netlink calls */ 70/* vendor ID used in SCSI netlink calls */
69#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) 71#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
@@ -903,6 +905,8 @@ struct RRQ { /* Structure is in Big Endian format */
903#define rrq_rxid_WORD rrq_exchg 905#define rrq_rxid_WORD rrq_exchg
904}; 906};
905 907
908#define LPFC_MAX_VFN_PER_PFN 255 /* Maximum VFs allowed per ARI */
909#define LPFC_DEF_VFN_PER_PFN 0 /* Default VFs due to platform limitation*/
906 910
907struct RTV_RSP { /* Structure is in Big Endian format */ 911struct RTV_RSP { /* Structure is in Big Endian format */
908 uint32_t ratov; 912 uint32_t ratov;
@@ -1199,7 +1203,9 @@ typedef struct {
1199#define PCI_DEVICE_ID_BALIUS 0xe131 1203#define PCI_DEVICE_ID_BALIUS 0xe131
1200#define PCI_DEVICE_ID_PROTEUS_PF 0xe180 1204#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
1201#define PCI_DEVICE_ID_LANCER_FC 0xe200 1205#define PCI_DEVICE_ID_LANCER_FC 0xe200
1206#define PCI_DEVICE_ID_LANCER_FC_VF 0xe208
1202#define PCI_DEVICE_ID_LANCER_FCOE 0xe260 1207#define PCI_DEVICE_ID_LANCER_FCOE 0xe260
1208#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
1203#define PCI_DEVICE_ID_SAT_SMB 0xf011 1209#define PCI_DEVICE_ID_SAT_SMB 0xf011
1204#define PCI_DEVICE_ID_SAT_MID 0xf015 1210#define PCI_DEVICE_ID_SAT_MID 0xf015
1205#define PCI_DEVICE_ID_RFLY 0xf095 1211#define PCI_DEVICE_ID_RFLY 0xf095
@@ -3021,7 +3027,7 @@ typedef struct {
3021#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t)) 3027#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
3022#define MAILBOX_HBA_EXT_OFFSET 0x100 3028#define MAILBOX_HBA_EXT_OFFSET 0x100
3023/* max mbox xmit size is a page size for sysfs IO operations */ 3029/* max mbox xmit size is a page size for sysfs IO operations */
3024#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE 3030#define MAILBOX_SYSFS_MAX 4096
3025 3031
3026typedef union { 3032typedef union {
3027 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ 3033 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4dff668ebdad..11e26a26b5d1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,6 +170,25 @@ struct lpfc_sli_intf {
170#define LPFC_PCI_FUNC3 3 170#define LPFC_PCI_FUNC3 3
171#define LPFC_PCI_FUNC4 4 171#define LPFC_PCI_FUNC4 4
172 172
173/* SLI4 interface type-2 control register offsets */
174#define LPFC_CTL_PORT_SEM_OFFSET 0x400
175#define LPFC_CTL_PORT_STA_OFFSET 0x404
176#define LPFC_CTL_PORT_CTL_OFFSET 0x408
177#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
178#define LPFC_CTL_PORT_ER2_OFFSET 0x410
179#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
180
181/* Some SLI4 interface type-2 PDEV_CTL register bits */
182#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
183#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
184#define LPFC_CTL_PDEV_CTL_DD 0x00000004
185#define LPFC_CTL_PDEV_CTL_LC 0x00000008
186#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
187#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
188#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
189
190#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
191
173/* Active interrupt test count */ 192/* Active interrupt test count */
174#define LPFC_ACT_INTR_CNT 4 193#define LPFC_ACT_INTR_CNT 4
175 194
@@ -210,9 +229,26 @@ struct ulp_bde64 {
210 229
211struct lpfc_sli4_flags { 230struct lpfc_sli4_flags {
212 uint32_t word0; 231 uint32_t word0;
213#define lpfc_fip_flag_SHIFT 0 232#define lpfc_idx_rsrc_rdy_SHIFT 0
214#define lpfc_fip_flag_MASK 0x00000001 233#define lpfc_idx_rsrc_rdy_MASK 0x00000001
215#define lpfc_fip_flag_WORD word0 234#define lpfc_idx_rsrc_rdy_WORD word0
235#define LPFC_IDX_RSRC_RDY 1
236#define lpfc_xri_rsrc_rdy_SHIFT 1
237#define lpfc_xri_rsrc_rdy_MASK 0x00000001
238#define lpfc_xri_rsrc_rdy_WORD word0
239#define LPFC_XRI_RSRC_RDY 1
240#define lpfc_rpi_rsrc_rdy_SHIFT 2
241#define lpfc_rpi_rsrc_rdy_MASK 0x00000001
242#define lpfc_rpi_rsrc_rdy_WORD word0
243#define LPFC_RPI_RSRC_RDY 1
244#define lpfc_vpi_rsrc_rdy_SHIFT 3
245#define lpfc_vpi_rsrc_rdy_MASK 0x00000001
246#define lpfc_vpi_rsrc_rdy_WORD word0
247#define LPFC_VPI_RSRC_RDY 1
248#define lpfc_vfi_rsrc_rdy_SHIFT 4
249#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
250#define lpfc_vfi_rsrc_rdy_WORD word0
251#define LPFC_VFI_RSRC_RDY 1
216}; 252};
217 253
218struct sli4_bls_rsp { 254struct sli4_bls_rsp {
@@ -739,6 +775,12 @@ union lpfc_sli4_cfg_shdr {
739#define lpfc_mbox_hdr_version_SHIFT 0 775#define lpfc_mbox_hdr_version_SHIFT 0
740#define lpfc_mbox_hdr_version_MASK 0x000000FF 776#define lpfc_mbox_hdr_version_MASK 0x000000FF
741#define lpfc_mbox_hdr_version_WORD word9 777#define lpfc_mbox_hdr_version_WORD word9
778#define lpfc_mbox_hdr_pf_num_SHIFT 16
779#define lpfc_mbox_hdr_pf_num_MASK 0x000000FF
780#define lpfc_mbox_hdr_pf_num_WORD word9
781#define lpfc_mbox_hdr_vh_num_SHIFT 24
782#define lpfc_mbox_hdr_vh_num_MASK 0x000000FF
783#define lpfc_mbox_hdr_vh_num_WORD word9
742#define LPFC_Q_CREATE_VERSION_2 2 784#define LPFC_Q_CREATE_VERSION_2 2
743#define LPFC_Q_CREATE_VERSION_1 1 785#define LPFC_Q_CREATE_VERSION_1 1
744#define LPFC_Q_CREATE_VERSION_0 0 786#define LPFC_Q_CREATE_VERSION_0 0
@@ -766,12 +808,22 @@ union lpfc_sli4_cfg_shdr {
766 } response; 808 } response;
767}; 809};
768 810
769/* Mailbox structures */ 811/* Mailbox Header structures.
812 * struct mbox_header is defined for first generation SLI4_CFG mailbox
813 * calls deployed for BE-based ports.
814 *
815 * struct sli4_mbox_header is defined for second generation SLI4
816 * ports that don't deploy the SLI4_CFG mechanism.
817 */
770struct mbox_header { 818struct mbox_header {
771 struct lpfc_sli4_cfg_mhdr cfg_mhdr; 819 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
772 union lpfc_sli4_cfg_shdr cfg_shdr; 820 union lpfc_sli4_cfg_shdr cfg_shdr;
773}; 821};
774 822
823#define LPFC_EXTENT_LOCAL 0
824#define LPFC_TIMEOUT_DEFAULT 0
825#define LPFC_EXTENT_VERSION_DEFAULT 0
826
775/* Subsystem Definitions */ 827/* Subsystem Definitions */
776#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 828#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
777#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC 829#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
@@ -794,6 +846,13 @@ struct mbox_header {
794#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 846#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
795#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 847#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
796#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 848#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
849#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
850#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
851#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
852#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
853#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
854#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
855#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
797#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 856#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
798 857
799/* FCoE Opcodes */ 858/* FCoE Opcodes */
@@ -808,6 +867,8 @@ struct mbox_header {
808#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A 867#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
809#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B 868#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
810#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 869#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
870#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
871#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
811 872
812/* Mailbox command structures */ 873/* Mailbox command structures */
813struct eq_context { 874struct eq_context {
@@ -1210,6 +1271,187 @@ struct lpfc_mbx_mq_destroy {
1210 } u; 1271 } u;
1211}; 1272};
1212 1273
1274/* Start Gen 2 SLI4 Mailbox definitions: */
1275
1276/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
1277#define LPFC_RSC_TYPE_FCOE_VFI 0x20
1278#define LPFC_RSC_TYPE_FCOE_VPI 0x21
1279#define LPFC_RSC_TYPE_FCOE_RPI 0x22
1280#define LPFC_RSC_TYPE_FCOE_XRI 0x23
1281
1282struct lpfc_mbx_get_rsrc_extent_info {
1283 struct mbox_header header;
1284 union {
1285 struct {
1286 uint32_t word4;
1287#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT 0
1288#define lpfc_mbx_get_rsrc_extent_info_type_MASK 0x0000FFFF
1289#define lpfc_mbx_get_rsrc_extent_info_type_WORD word4
1290 } req;
1291 struct {
1292 uint32_t word4;
1293#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT 0
1294#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK 0x0000FFFF
1295#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD word4
1296#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT 16
1297#define lpfc_mbx_get_rsrc_extent_info_size_MASK 0x0000FFFF
1298#define lpfc_mbx_get_rsrc_extent_info_size_WORD word4
1299 } rsp;
1300 } u;
1301};
1302
1303struct lpfc_id_range {
1304 uint32_t word5;
1305#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
1306#define lpfc_mbx_rsrc_id_word4_0_MASK 0x0000FFFF
1307#define lpfc_mbx_rsrc_id_word4_0_WORD word5
1308#define lpfc_mbx_rsrc_id_word4_1_SHIFT 16
1309#define lpfc_mbx_rsrc_id_word4_1_MASK 0x0000FFFF
1310#define lpfc_mbx_rsrc_id_word4_1_WORD word5
1311};
1312
1313struct lpfc_mbx_set_link_diag_state {
1314 struct mbox_header header;
1315 union {
1316 struct {
1317 uint32_t word0;
1318#define lpfc_mbx_set_diag_state_diag_SHIFT 0
1319#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
1320#define lpfc_mbx_set_diag_state_diag_WORD word0
1321#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
1322#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
1323#define lpfc_mbx_set_diag_state_link_num_WORD word0
1324#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
1325#define lpfc_mbx_set_diag_state_link_type_MASK 0x00000003
1326#define lpfc_mbx_set_diag_state_link_type_WORD word0
1327 } req;
1328 struct {
1329 uint32_t word0;
1330 } rsp;
1331 } u;
1332};
1333
1334struct lpfc_mbx_set_link_diag_loopback {
1335 struct mbox_header header;
1336 union {
1337 struct {
1338 uint32_t word0;
1339#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
1340#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
1341#define lpfc_mbx_set_diag_lpbk_type_WORD word0
1342#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
1343#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
1344#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
1345#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
1346#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
1347#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
1348#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
1349#define lpfc_mbx_set_diag_lpbk_link_type_MASK 0x00000003
1350#define lpfc_mbx_set_diag_lpbk_link_type_WORD word0
1351 } req;
1352 struct {
1353 uint32_t word0;
1354 } rsp;
1355 } u;
1356};
1357
1358struct lpfc_mbx_run_link_diag_test {
1359 struct mbox_header header;
1360 union {
1361 struct {
1362 uint32_t word0;
1363#define lpfc_mbx_run_diag_test_link_num_SHIFT 16
1364#define lpfc_mbx_run_diag_test_link_num_MASK 0x0000003F
1365#define lpfc_mbx_run_diag_test_link_num_WORD word0
1366#define lpfc_mbx_run_diag_test_link_type_SHIFT 22
1367#define lpfc_mbx_run_diag_test_link_type_MASK 0x00000003
1368#define lpfc_mbx_run_diag_test_link_type_WORD word0
1369 uint32_t word1;
1370#define lpfc_mbx_run_diag_test_test_id_SHIFT 0
1371#define lpfc_mbx_run_diag_test_test_id_MASK 0x0000FFFF
1372#define lpfc_mbx_run_diag_test_test_id_WORD word1
1373#define lpfc_mbx_run_diag_test_loops_SHIFT 16
1374#define lpfc_mbx_run_diag_test_loops_MASK 0x0000FFFF
1375#define lpfc_mbx_run_diag_test_loops_WORD word1
1376 uint32_t word2;
1377#define lpfc_mbx_run_diag_test_test_ver_SHIFT 0
1378#define lpfc_mbx_run_diag_test_test_ver_MASK 0x0000FFFF
1379#define lpfc_mbx_run_diag_test_test_ver_WORD word2
1380#define lpfc_mbx_run_diag_test_err_act_SHIFT 16
1381#define lpfc_mbx_run_diag_test_err_act_MASK 0x000000FF
1382#define lpfc_mbx_run_diag_test_err_act_WORD word2
1383 } req;
1384 struct {
1385 uint32_t word0;
1386 } rsp;
1387 } u;
1388};
1389
1390/*
1391 * struct lpfc_mbx_alloc_rsrc_extents:
1392 * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
1393 * 6 words of header + 4 words of shared subcommand header +
1394 * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
1395 *
1396 * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
1397 * for extents payload.
1398 *
1399 * 212/2 (bytes per extent) = 106 extents.
1400 * 106/2 (extents per word) = 53 words.
1401 * lpfc_id_range id is statically size to 53.
1402 *
1403 * This mailbox definition is used for ALLOC or GET_ALLOCATED
1404 * extent ranges. For ALLOC, the type and cnt are required.
1405 * For GET_ALLOCATED, only the type is required.
1406 */
1407struct lpfc_mbx_alloc_rsrc_extents {
1408 struct mbox_header header;
1409 union {
1410 struct {
1411 uint32_t word4;
1412#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT 0
1413#define lpfc_mbx_alloc_rsrc_extents_type_MASK 0x0000FFFF
1414#define lpfc_mbx_alloc_rsrc_extents_type_WORD word4
1415#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT 16
1416#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK 0x0000FFFF
1417#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD word4
1418 } req;
1419 struct {
1420 uint32_t word4;
1421#define lpfc_mbx_rsrc_cnt_SHIFT 0
1422#define lpfc_mbx_rsrc_cnt_MASK 0x0000FFFF
1423#define lpfc_mbx_rsrc_cnt_WORD word4
1424 struct lpfc_id_range id[53];
1425 } rsp;
1426 } u;
1427};
1428
1429/*
1430 * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
1431 * structure shares the same SHIFT/MASK/WORD defines provided in the
1432 * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
1433 * the structures defined above. This non-embedded structure provides for the
1434 * maximum number of extents supported by the port.
1435 */
1436struct lpfc_mbx_nembed_rsrc_extent {
1437 union lpfc_sli4_cfg_shdr cfg_shdr;
1438 uint32_t word4;
1439 struct lpfc_id_range id;
1440};
1441
1442struct lpfc_mbx_dealloc_rsrc_extents {
1443 struct mbox_header header;
1444 struct {
1445 uint32_t word4;
1446#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT 0
1447#define lpfc_mbx_dealloc_rsrc_extents_type_MASK 0x0000FFFF
1448#define lpfc_mbx_dealloc_rsrc_extents_type_WORD word4
1449 } req;
1450
1451};
1452
1453/* Start SLI4 FCoE specific mbox structures. */
1454
1213struct lpfc_mbx_post_hdr_tmpl { 1455struct lpfc_mbx_post_hdr_tmpl {
1214 struct mbox_header header; 1456 struct mbox_header header;
1215 uint32_t word10; 1457 uint32_t word10;
@@ -1229,7 +1471,7 @@ struct sli4_sge { /* SLI-4 */
1229 1471
1230 uint32_t word2; 1472 uint32_t word2;
1231#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ 1473#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1232#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF 1474#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF
1233#define lpfc_sli4_sge_offset_WORD word2 1475#define lpfc_sli4_sge_offset_WORD word2
1234#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets 1476#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1235 this flag !! */ 1477 this flag !! */
@@ -1773,61 +2015,31 @@ struct lpfc_mbx_read_rev {
1773 2015
1774struct lpfc_mbx_read_config { 2016struct lpfc_mbx_read_config {
1775 uint32_t word1; 2017 uint32_t word1;
1776#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 2018#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
1777#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF 2019#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
1778#define lpfc_mbx_rd_conf_max_bbc_WORD word1 2020#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
1779#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1780#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1781#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1782 uint32_t word2; 2021 uint32_t word2;
1783#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1784#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1785#define lpfc_mbx_rd_conf_nport_did_WORD word2
1786#define lpfc_mbx_rd_conf_topology_SHIFT 24 2022#define lpfc_mbx_rd_conf_topology_SHIFT 24
1787#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF 2023#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1788#define lpfc_mbx_rd_conf_topology_WORD word2 2024#define lpfc_mbx_rd_conf_topology_WORD word2
1789 uint32_t word3; 2025 uint32_t rsvd_3;
1790#define lpfc_mbx_rd_conf_ao_SHIFT 0
1791#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1792#define lpfc_mbx_rd_conf_ao_WORD word3
1793#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1794#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1795#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1796#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1797#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1798#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1799#define lpfc_mbx_rd_conf_mc_SHIFT 29
1800#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1801#define lpfc_mbx_rd_conf_mc_WORD word3
1802 uint32_t word4; 2026 uint32_t word4;
1803#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 2027#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1804#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF 2028#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1805#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 2029#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1806 uint32_t word5; 2030 uint32_t rsvd_5;
1807#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1808#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1809#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1810 uint32_t word6; 2031 uint32_t word6;
1811#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 2032#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1812#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF 2033#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1813#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 2034#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1814 uint32_t word7; 2035 uint32_t rsvd_7;
1815#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 2036 uint32_t rsvd_8;
1816#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1817#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1818 uint32_t word8;
1819#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1820#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1821#define lpfc_mbx_rd_conf_al_tov_WORD word8
1822 uint32_t word9; 2037 uint32_t word9;
1823#define lpfc_mbx_rd_conf_lmt_SHIFT 0 2038#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1824#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF 2039#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1825#define lpfc_mbx_rd_conf_lmt_WORD word9 2040#define lpfc_mbx_rd_conf_lmt_WORD word9
1826 uint32_t word10; 2041 uint32_t rsvd_10;
1827#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 2042 uint32_t rsvd_11;
1828#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1829#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1830 uint32_t word11_rsvd;
1831 uint32_t word12; 2043 uint32_t word12;
1832#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 2044#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1833#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF 2045#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
@@ -1857,9 +2069,6 @@ struct lpfc_mbx_read_config {
1857#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF 2069#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1858#define lpfc_mbx_rd_conf_vfi_count_WORD word15 2070#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1859 uint32_t word16; 2071 uint32_t word16;
1860#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1861#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1862#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1863#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 2072#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1864#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF 2073#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1865#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 2074#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
@@ -2169,6 +2378,12 @@ struct lpfc_sli4_parameters {
2169#define cfg_fcoe_SHIFT 0 2378#define cfg_fcoe_SHIFT 0
2170#define cfg_fcoe_MASK 0x00000001 2379#define cfg_fcoe_MASK 0x00000001
2171#define cfg_fcoe_WORD word12 2380#define cfg_fcoe_WORD word12
2381#define cfg_ext_SHIFT 1
2382#define cfg_ext_MASK 0x00000001
2383#define cfg_ext_WORD word12
2384#define cfg_hdrr_SHIFT 2
2385#define cfg_hdrr_MASK 0x00000001
2386#define cfg_hdrr_WORD word12
2172#define cfg_phwq_SHIFT 15 2387#define cfg_phwq_SHIFT 15
2173#define cfg_phwq_MASK 0x00000001 2388#define cfg_phwq_MASK 0x00000001
2174#define cfg_phwq_WORD word12 2389#define cfg_phwq_WORD word12
@@ -2198,6 +2413,145 @@ struct lpfc_mbx_get_sli4_parameters {
2198 struct lpfc_sli4_parameters sli4_parameters; 2413 struct lpfc_sli4_parameters sli4_parameters;
2199}; 2414};
2200 2415
2416struct lpfc_rscr_desc_generic {
2417#define LPFC_RSRC_DESC_WSIZE 18
2418 uint32_t desc[LPFC_RSRC_DESC_WSIZE];
2419};
2420
2421struct lpfc_rsrc_desc_pcie {
2422 uint32_t word0;
2423#define lpfc_rsrc_desc_pcie_type_SHIFT 0
2424#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
2425#define lpfc_rsrc_desc_pcie_type_WORD word0
2426#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
2427 uint32_t word1;
2428#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
2429#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
2430#define lpfc_rsrc_desc_pcie_pfnum_WORD word1
2431 uint32_t reserved;
2432 uint32_t word3;
2433#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT 0
2434#define lpfc_rsrc_desc_pcie_sriov_sta_MASK 0x000000ff
2435#define lpfc_rsrc_desc_pcie_sriov_sta_WORD word3
2436#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT 8
2437#define lpfc_rsrc_desc_pcie_pf_sta_MASK 0x000000ff
2438#define lpfc_rsrc_desc_pcie_pf_sta_WORD word3
2439#define lpfc_rsrc_desc_pcie_pf_type_SHIFT 16
2440#define lpfc_rsrc_desc_pcie_pf_type_MASK 0x000000ff
2441#define lpfc_rsrc_desc_pcie_pf_type_WORD word3
2442 uint32_t word4;
2443#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT 0
2444#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK 0x0000ffff
2445#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD word4
2446};
2447
2448struct lpfc_rsrc_desc_fcfcoe {
2449 uint32_t word0;
2450#define lpfc_rsrc_desc_fcfcoe_type_SHIFT 0
2451#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
2452#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
2453#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
2454 uint32_t word1;
2455#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
2456#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
2457#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD word1
2458#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT 16
2459#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK 0x000007ff
2460#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD word1
2461 uint32_t word2;
2462#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT 0
2463#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK 0x0000ffff
2464#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD word2
2465#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT 16
2466#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK 0x0000ffff
2467#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD word2
2468 uint32_t word3;
2469#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT 0
2470#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK 0x0000ffff
2471#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD word3
2472#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT 16
2473#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK 0x0000ffff
2474#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD word3
2475 uint32_t word4;
2476#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT 0
2477#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK 0x0000ffff
2478#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD word4
2479#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT 16
2480#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK 0x0000ffff
2481#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD word4
2482 uint32_t word5;
2483#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT 0
2484#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK 0x0000ffff
2485#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD word5
2486#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT 16
2487#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK 0x0000ffff
2488#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD word5
2489 uint32_t word6;
2490 uint32_t word7;
2491 uint32_t word8;
2492 uint32_t word9;
2493 uint32_t word10;
2494 uint32_t word11;
2495 uint32_t word12;
2496 uint32_t word13;
2497#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT 0
2498#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK 0x0000003f
2499#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD word13
2500#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT 6
2501#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK 0x00000003
2502#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD word13
2503#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT 8
2504#define lpfc_rsrc_desc_fcfcoe_lmc_MASK 0x00000001
2505#define lpfc_rsrc_desc_fcfcoe_lmc_WORD word13
2506#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT 9
2507#define lpfc_rsrc_desc_fcfcoe_lld_MASK 0x00000001
2508#define lpfc_rsrc_desc_fcfcoe_lld_WORD word13
2509#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
2510#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
2511#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
2512};
2513
2514struct lpfc_func_cfg {
2515#define LPFC_RSRC_DESC_MAX_NUM 2
2516 uint32_t rsrc_desc_count;
2517 struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
2518};
2519
2520struct lpfc_mbx_get_func_cfg {
2521 struct mbox_header header;
2522#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
2523#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
2524#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
2525 struct lpfc_func_cfg func_cfg;
2526};
2527
2528struct lpfc_prof_cfg {
2529#define LPFC_RSRC_DESC_MAX_NUM 2
2530 uint32_t rsrc_desc_count;
2531 struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
2532};
2533
2534struct lpfc_mbx_get_prof_cfg {
2535 struct mbox_header header;
2536#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE 0x0
2537#define LPFC_CFG_TYPE_FACTURY_DEFAULT 0x1
2538#define LPFC_CFG_TYPE_CURRENT_ACTIVE 0x2
2539 union {
2540 struct {
2541 uint32_t word10;
2542#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT 0
2543#define lpfc_mbx_get_prof_cfg_prof_id_MASK 0x000000ff
2544#define lpfc_mbx_get_prof_cfg_prof_id_WORD word10
2545#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT 8
2546#define lpfc_mbx_get_prof_cfg_prof_tp_MASK 0x00000003
2547#define lpfc_mbx_get_prof_cfg_prof_tp_WORD word10
2548 } request;
2549 struct {
2550 struct lpfc_prof_cfg prof_cfg;
2551 } response;
2552 } u;
2553};
2554
2201/* Mailbox Completion Queue Error Messages */ 2555/* Mailbox Completion Queue Error Messages */
2202#define MB_CQE_STATUS_SUCCESS 0x0 2556#define MB_CQE_STATUS_SUCCESS 0x0
2203#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 2557#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2206,6 +2560,29 @@ struct lpfc_mbx_get_sli4_parameters {
2206#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 2560#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
2207#define MB_CQE_STATUS_DMA_FAILED 0x5 2561#define MB_CQE_STATUS_DMA_FAILED 0x5
2208 2562
2563#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
2564struct lpfc_mbx_wr_object {
2565 struct mbox_header header;
2566 union {
2567 struct {
2568 uint32_t word4;
2569#define lpfc_wr_object_eof_SHIFT 31
2570#define lpfc_wr_object_eof_MASK 0x00000001
2571#define lpfc_wr_object_eof_WORD word4
2572#define lpfc_wr_object_write_length_SHIFT 0
2573#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
2574#define lpfc_wr_object_write_length_WORD word4
2575 uint32_t write_offset;
2576 uint32_t object_name[26];
2577 uint32_t bde_count;
2578 struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
2579 } request;
2580 struct {
2581 uint32_t actual_write_length;
2582 } response;
2583 } u;
2584};
2585
2209/* mailbox queue entry structure */ 2586/* mailbox queue entry structure */
2210struct lpfc_mqe { 2587struct lpfc_mqe {
2211 uint32_t word0; 2588 uint32_t word0;
@@ -2241,6 +2618,9 @@ struct lpfc_mqe {
2241 struct lpfc_mbx_cq_destroy cq_destroy; 2618 struct lpfc_mbx_cq_destroy cq_destroy;
2242 struct lpfc_mbx_wq_destroy wq_destroy; 2619 struct lpfc_mbx_wq_destroy wq_destroy;
2243 struct lpfc_mbx_rq_destroy rq_destroy; 2620 struct lpfc_mbx_rq_destroy rq_destroy;
2621 struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
2622 struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
2623 struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
2244 struct lpfc_mbx_post_sgl_pages post_sgl_pages; 2624 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
2245 struct lpfc_mbx_nembed_cmd nembed_cmd; 2625 struct lpfc_mbx_nembed_cmd nembed_cmd;
2246 struct lpfc_mbx_read_rev read_rev; 2626 struct lpfc_mbx_read_rev read_rev;
@@ -2252,7 +2632,13 @@ struct lpfc_mqe {
2252 struct lpfc_mbx_supp_pages supp_pages; 2632 struct lpfc_mbx_supp_pages supp_pages;
2253 struct lpfc_mbx_pc_sli4_params sli4_params; 2633 struct lpfc_mbx_pc_sli4_params sli4_params;
2254 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; 2634 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
2635 struct lpfc_mbx_set_link_diag_state link_diag_state;
2636 struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
2637 struct lpfc_mbx_run_link_diag_test link_diag_test;
2638 struct lpfc_mbx_get_func_cfg get_func_cfg;
2639 struct lpfc_mbx_get_prof_cfg get_prof_cfg;
2255 struct lpfc_mbx_nop nop; 2640 struct lpfc_mbx_nop nop;
2641 struct lpfc_mbx_wr_object wr_object;
2256 } un; 2642 } un;
2257}; 2643};
2258 2644
@@ -2458,7 +2844,7 @@ struct lpfc_bmbx_create {
2458#define SGL_ALIGN_SZ 64 2844#define SGL_ALIGN_SZ 64
2459#define SGL_PAGE_SIZE 4096 2845#define SGL_PAGE_SIZE 4096
2460/* align SGL addr on a size boundary - adjust address up */ 2846/* align SGL addr on a size boundary - adjust address up */
2461#define NO_XRI ((uint16_t)-1) 2847#define NO_XRI 0xffff
2462 2848
2463struct wqe_common { 2849struct wqe_common {
2464 uint32_t word6; 2850 uint32_t word6;
@@ -2798,9 +3184,28 @@ union lpfc_wqe {
2798 struct gen_req64_wqe gen_req; 3184 struct gen_req64_wqe gen_req;
2799}; 3185};
2800 3186
3187#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
3188#define LPFC_FILE_TYPE_GROUP 0xf7
3189#define LPFC_FILE_ID_GROUP 0xa2
3190struct lpfc_grp_hdr {
3191 uint32_t size;
3192 uint32_t magic_number;
3193 uint32_t word2;
3194#define lpfc_grp_hdr_file_type_SHIFT 24
3195#define lpfc_grp_hdr_file_type_MASK 0x000000FF
3196#define lpfc_grp_hdr_file_type_WORD word2
3197#define lpfc_grp_hdr_id_SHIFT 16
3198#define lpfc_grp_hdr_id_MASK 0x000000FF
3199#define lpfc_grp_hdr_id_WORD word2
3200 uint8_t rev_name[128];
3201};
3202
2801#define FCP_COMMAND 0x0 3203#define FCP_COMMAND 0x0
2802#define FCP_COMMAND_DATA_OUT 0x1 3204#define FCP_COMMAND_DATA_OUT 0x1
2803#define ELS_COMMAND_NON_FIP 0xC 3205#define ELS_COMMAND_NON_FIP 0xC
2804#define ELS_COMMAND_FIP 0xD 3206#define ELS_COMMAND_FIP 0xD
2805#define OTHER_COMMAND 0x8 3207#define OTHER_COMMAND 0x8
2806 3208
3209#define LPFC_FW_DUMP 1
3210#define LPFC_FW_RESET 2
3211#define LPFC_DV_RESET 3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dda036a1af3..148b98ddbb1d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -30,6 +30,7 @@
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h> 31#include <linux/aer.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/firmware.h>
33 34
34#include <scsi/scsi.h> 35#include <scsi/scsi.h>
35#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
@@ -211,7 +212,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212 if (!lpfc_vpd_data) 213 if (!lpfc_vpd_data)
213 goto out_free_mbox; 214 goto out_free_mbox;
214
215 do { 215 do {
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -309,6 +309,45 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
309} 309}
310 310
311/** 311/**
312 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
313 * cfg_soft_wwnn, cfg_soft_wwpn
314 * @vport: pointer to lpfc vport data structure.
315 *
316 *
317 * Return codes
318 * None.
319 **/
320void
321lpfc_update_vport_wwn(struct lpfc_vport *vport)
322{
323 /* If the soft name exists then update it using the service params */
324 if (vport->phba->cfg_soft_wwnn)
325 u64_to_wwn(vport->phba->cfg_soft_wwnn,
326 vport->fc_sparam.nodeName.u.wwn);
327 if (vport->phba->cfg_soft_wwpn)
328 u64_to_wwn(vport->phba->cfg_soft_wwpn,
329 vport->fc_sparam.portName.u.wwn);
330
331 /*
332 * If the name is empty or there exists a soft name
333 * then copy the service params name, otherwise use the fc name
334 */
335 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
336 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
337 sizeof(struct lpfc_name));
338 else
339 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
340 sizeof(struct lpfc_name));
341
342 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
343 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
344 sizeof(struct lpfc_name));
345 else
346 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
347 sizeof(struct lpfc_name));
348}
349
350/**
312 * lpfc_config_port_post - Perform lpfc initialization after config port 351 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure. 352 * @phba: pointer to lpfc hba data structure.
314 * 353 *
@@ -377,17 +416,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
378 kfree(mp); 417 kfree(mp);
379 pmb->context1 = NULL; 418 pmb->context1 = NULL;
380 419 lpfc_update_vport_wwn(vport);
381 if (phba->cfg_soft_wwnn)
382 u64_to_wwn(phba->cfg_soft_wwnn,
383 vport->fc_sparam.nodeName.u.wwn);
384 if (phba->cfg_soft_wwpn)
385 u64_to_wwn(phba->cfg_soft_wwpn,
386 vport->fc_sparam.portName.u.wwn);
387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 sizeof (struct lpfc_name));
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof (struct lpfc_name));
391 420
392 /* Update the fc_host data structures with new wwn. */ 421 /* Update the fc_host data structures with new wwn. */
393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 422 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
@@ -573,7 +602,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
573 /* Clear all pending interrupts */ 602 /* Clear all pending interrupts */
574 writel(0xffffffff, phba->HAregaddr); 603 writel(0xffffffff, phba->HAregaddr);
575 readl(phba->HAregaddr); /* flush */ 604 readl(phba->HAregaddr); /* flush */
576
577 phba->link_state = LPFC_HBA_ERROR; 605 phba->link_state = LPFC_HBA_ERROR;
578 if (rc != MBX_BUSY) 606 if (rc != MBX_BUSY)
579 mempool_free(pmb, phba->mbox_mem_pool); 607 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1755,7 +1783,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1755 && descp && descp[0] != '\0') 1783 && descp && descp[0] != '\0')
1756 return; 1784 return;
1757 1785
1758 if (phba->lmt & LMT_10Gb) 1786 if (phba->lmt & LMT_16Gb)
1787 max_speed = 16;
1788 else if (phba->lmt & LMT_10Gb)
1759 max_speed = 10; 1789 max_speed = 10;
1760 else if (phba->lmt & LMT_8Gb) 1790 else if (phba->lmt & LMT_8Gb)
1761 max_speed = 8; 1791 max_speed = 8;
@@ -1922,12 +1952,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1922 "Fibre Channel Adapter"}; 1952 "Fibre Channel Adapter"};
1923 break; 1953 break;
1924 case PCI_DEVICE_ID_LANCER_FC: 1954 case PCI_DEVICE_ID_LANCER_FC:
1925 oneConnect = 1; 1955 case PCI_DEVICE_ID_LANCER_FC_VF:
1926 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1956 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
1927 break; 1957 break;
1928 case PCI_DEVICE_ID_LANCER_FCOE: 1958 case PCI_DEVICE_ID_LANCER_FCOE:
1959 case PCI_DEVICE_ID_LANCER_FCOE_VF:
1929 oneConnect = 1; 1960 oneConnect = 1;
1930 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1961 m = (typeof(m)){"OCe50100", "PCIe", "FCoE"};
1931 break; 1962 break;
1932 default: 1963 default:
1933 m = (typeof(m)){"Unknown", "", ""}; 1964 m = (typeof(m)){"Unknown", "", ""};
@@ -1936,7 +1967,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1936 1967
1937 if (mdp && mdp[0] == '\0') 1968 if (mdp && mdp[0] == '\0')
1938 snprintf(mdp, 79,"%s", m.name); 1969 snprintf(mdp, 79,"%s", m.name);
1939 /* oneConnect hba requires special processing, they are all initiators 1970 /*
1971 * oneConnect hba requires special processing, they are all initiators
1940 * and we put the port number on the end 1972 * and we put the port number on the end
1941 */ 1973 */
1942 if (descp && descp[0] == '\0') { 1974 if (descp && descp[0] == '\0') {
@@ -2656,6 +2688,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2656 kfree(io); 2688 kfree(io);
2657 phba->total_iocbq_bufs--; 2689 phba->total_iocbq_bufs--;
2658 } 2690 }
2691
2659 spin_unlock_irq(&phba->hbalock); 2692 spin_unlock_irq(&phba->hbalock);
2660 return 0; 2693 return 0;
2661} 2694}
@@ -3612,6 +3645,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3612 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3645 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3613 "2718 Clear Virtual Link Received for VPI 0x%x" 3646 "2718 Clear Virtual Link Received for VPI 0x%x"
3614 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648
3615 vport = lpfc_find_vport_by_vpid(phba, 3649 vport = lpfc_find_vport_by_vpid(phba,
3616 acqe_fip->index - phba->vpi_base); 3650 acqe_fip->index - phba->vpi_base);
3617 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3651 ndlp = lpfc_sli4_perform_vport_cvl(vport);
@@ -3935,6 +3969,10 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
3935 pci_try_set_mwi(pdev); 3969 pci_try_set_mwi(pdev);
3936 pci_save_state(pdev); 3970 pci_save_state(pdev);
3937 3971
3972 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3973 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
3974 pdev->needs_freset = 1;
3975
3938 return 0; 3976 return 0;
3939 3977
3940out_disable_device: 3978out_disable_device:
@@ -3997,6 +4035,36 @@ lpfc_reset_hba(struct lpfc_hba *phba)
3997} 4035}
3998 4036
3999/** 4037/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled.
4041 *
4042 * This function enables the PCI SR-IOV virtual functions to a physical
4043 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4044 * enable the number of virtual functions to the physical function. As
4045 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4046 * API call does not considered as an error condition for most of the device.
4047 **/
4048int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{
4051 struct pci_dev *pdev = phba->pcidev;
4052 int rc;
4053
4054 rc = pci_enable_sriov(pdev, nr_vfn);
4055 if (rc) {
4056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4057 "2806 Failed to enable sriov on this device "
4058 "with vfn number nr_vf:%d, rc:%d\n",
4059 nr_vfn, rc);
4060 } else
4061 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4062 "2807 Successful enable sriov on this device "
4063 "with vfn number nr_vf:%d\n", nr_vfn);
4064 return rc;
4065}
4066
4067/**
4000 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 4068 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4001 * @phba: pointer to lpfc hba data structure. 4069 * @phba: pointer to lpfc hba data structure.
4002 * 4070 *
@@ -4011,6 +4079,7 @@ static int
4011lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 4079lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4012{ 4080{
4013 struct lpfc_sli *psli; 4081 struct lpfc_sli *psli;
4082 int rc;
4014 4083
4015 /* 4084 /*
4016 * Initialize timers used by driver 4085 * Initialize timers used by driver
@@ -4085,6 +4154,23 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4085 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4154 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4086 return -ENOMEM; 4155 return -ENOMEM;
4087 4156
4157 /*
4158 * Enable sr-iov virtual functions if supported and configured
4159 * through the module parameter.
4160 */
4161 if (phba->cfg_sriov_nr_virtfn > 0) {
4162 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4163 phba->cfg_sriov_nr_virtfn);
4164 if (rc) {
4165 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4166 "2808 Requested number of SR-IOV "
4167 "virtual functions (%d) is not "
4168 "supported\n",
4169 phba->cfg_sriov_nr_virtfn);
4170 phba->cfg_sriov_nr_virtfn = 0;
4171 }
4172 }
4173
4088 return 0; 4174 return 0;
4089} 4175}
4090 4176
@@ -4161,6 +4247,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4161 phba->fcf.redisc_wait.data = (unsigned long)phba; 4247 phba->fcf.redisc_wait.data = (unsigned long)phba;
4162 4248
4163 /* 4249 /*
4250 * Control structure for handling external multi-buffer mailbox
4251 * command pass-through.
4252 */
4253 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4254 sizeof(struct lpfc_mbox_ext_buf_ctx));
4255 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4256
4257 /*
4164 * We need to do a READ_CONFIG mailbox command here before 4258 * We need to do a READ_CONFIG mailbox command here before
4165 * calling lpfc_get_cfgparam. For VFs this will report the 4259 * calling lpfc_get_cfgparam. For VFs this will report the
4166 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4260 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
@@ -4233,7 +4327,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4233 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4327 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4234 4328
4235 /* 4329 /*
4236 * Initialize dirver internal slow-path work queues 4330 * Initialize driver internal slow-path work queues
4237 */ 4331 */
4238 4332
4239 /* Driver internel slow-path CQ Event pool */ 4333 /* Driver internel slow-path CQ Event pool */
@@ -4249,6 +4343,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4249 /* Receive queue CQ Event work queue list */ 4343 /* Receive queue CQ Event work queue list */
4250 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4344 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4251 4345
4346 /* Initialize extent block lists. */
4347 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
4348 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
4349 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
4350 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
4351
4252 /* Initialize the driver internal SLI layer lists. */ 4352 /* Initialize the driver internal SLI layer lists. */
4253 lpfc_sli_setup(phba); 4353 lpfc_sli_setup(phba);
4254 lpfc_sli_queue_setup(phba); 4354 lpfc_sli_queue_setup(phba);
@@ -4323,9 +4423,19 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4323 } 4423 }
4324 /* 4424 /*
4325 * Get sli4 parameters that override parameters from Port capabilities. 4425 * Get sli4 parameters that override parameters from Port capabilities.
4326 * If this call fails it is not a critical error so continue loading. 4426 * If this call fails, it isn't critical unless the SLI4 parameters come
4427 * back in conflict.
4327 */ 4428 */
4328 lpfc_get_sli4_parameters(phba, mboxq); 4429 rc = lpfc_get_sli4_parameters(phba, mboxq);
4430 if (rc) {
4431 if (phba->sli4_hba.extents_in_use &&
4432 phba->sli4_hba.rpi_hdrs_in_use) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2999 Unsupported SLI4 Parameters "
4435 "Extents and RPI headers enabled.\n");
4436 goto out_free_bsmbx;
4437 }
4438 }
4329 mempool_free(mboxq, phba->mbox_mem_pool); 4439 mempool_free(mboxq, phba->mbox_mem_pool);
4330 /* Create all the SLI4 queues */ 4440 /* Create all the SLI4 queues */
4331 rc = lpfc_sli4_queue_create(phba); 4441 rc = lpfc_sli4_queue_create(phba);
@@ -4350,7 +4460,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4350 "1430 Failed to initialize sgl list.\n"); 4460 "1430 Failed to initialize sgl list.\n");
4351 goto out_free_sgl_list; 4461 goto out_free_sgl_list;
4352 } 4462 }
4353
4354 rc = lpfc_sli4_init_rpi_hdrs(phba); 4463 rc = lpfc_sli4_init_rpi_hdrs(phba);
4355 if (rc) { 4464 if (rc) {
4356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4366,6 +4475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4367 "2759 Failed allocate memory for FCF round " 4476 "2759 Failed allocate memory for FCF round "
4368 "robin failover bmask\n"); 4477 "robin failover bmask\n");
4478 rc = -ENOMEM;
4369 goto out_remove_rpi_hdrs; 4479 goto out_remove_rpi_hdrs;
4370 } 4480 }
4371 4481
@@ -4375,6 +4485,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4376 "2572 Failed allocate memory for fast-path " 4486 "2572 Failed allocate memory for fast-path "
4377 "per-EQ handle array\n"); 4487 "per-EQ handle array\n");
4488 rc = -ENOMEM;
4378 goto out_free_fcf_rr_bmask; 4489 goto out_free_fcf_rr_bmask;
4379 } 4490 }
4380 4491
@@ -4384,9 +4495,27 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4385 "2573 Failed allocate memory for msi-x " 4496 "2573 Failed allocate memory for msi-x "
4386 "interrupt vector entries\n"); 4497 "interrupt vector entries\n");
4498 rc = -ENOMEM;
4387 goto out_free_fcp_eq_hdl; 4499 goto out_free_fcp_eq_hdl;
4388 } 4500 }
4389 4501
4502 /*
4503 * Enable sr-iov virtual functions if supported and configured
4504 * through the module parameter.
4505 */
4506 if (phba->cfg_sriov_nr_virtfn > 0) {
4507 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4508 phba->cfg_sriov_nr_virtfn);
4509 if (rc) {
4510 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4511 "3020 Requested number of SR-IOV "
4512 "virtual functions (%d) is not "
4513 "supported\n",
4514 phba->cfg_sriov_nr_virtfn);
4515 phba->cfg_sriov_nr_virtfn = 0;
4516 }
4517 }
4518
4390 return rc; 4519 return rc;
4391 4520
4392out_free_fcp_eq_hdl: 4521out_free_fcp_eq_hdl:
@@ -4449,6 +4578,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4449 lpfc_sli4_cq_event_release_all(phba); 4578 lpfc_sli4_cq_event_release_all(phba);
4450 lpfc_sli4_cq_event_pool_destroy(phba); 4579 lpfc_sli4_cq_event_pool_destroy(phba);
4451 4580
4581 /* Release resource identifiers. */
4582 lpfc_sli4_dealloc_resource_identifiers(phba);
4583
4452 /* Free the bsmbx region. */ 4584 /* Free the bsmbx region. */
4453 lpfc_destroy_bootstrap_mbox(phba); 4585 lpfc_destroy_bootstrap_mbox(phba);
4454 4586
@@ -4649,6 +4781,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4649 "Unloading driver.\n", __func__); 4781 "Unloading driver.\n", __func__);
4650 goto out_free_iocbq; 4782 goto out_free_iocbq;
4651 } 4783 }
4784 iocbq_entry->sli4_lxritag = NO_XRI;
4652 iocbq_entry->sli4_xritag = NO_XRI; 4785 iocbq_entry->sli4_xritag = NO_XRI;
4653 4786
4654 spin_lock_irq(&phba->hbalock); 4787 spin_lock_irq(&phba->hbalock);
@@ -4746,7 +4879,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4746 4879
4747 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4880 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4748 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4881 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4749 "2400 lpfc_init_sgl_list els %d.\n", 4882 "2400 ELS XRI count %d.\n",
4750 els_xri_cnt); 4883 els_xri_cnt);
4751 /* Initialize and populate the sglq list per host/VF. */ 4884 /* Initialize and populate the sglq list per host/VF. */
4752 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4885 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
@@ -4779,7 +4912,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4779 phba->sli4_hba.scsi_xri_max = 4912 phba->sli4_hba.scsi_xri_max =
4780 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4913 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4781 phba->sli4_hba.scsi_xri_cnt = 0; 4914 phba->sli4_hba.scsi_xri_cnt = 0;
4782
4783 phba->sli4_hba.lpfc_scsi_psb_array = 4915 phba->sli4_hba.lpfc_scsi_psb_array =
4784 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4916 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4785 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4917 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
@@ -4802,13 +4934,6 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4802 goto out_free_mem; 4934 goto out_free_mem;
4803 } 4935 }
4804 4936
4805 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4806 if (sglq_entry->sli4_xritag == NO_XRI) {
4807 kfree(sglq_entry);
4808 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4809 "Unloading driver.\n", __func__);
4810 goto out_free_mem;
4811 }
4812 sglq_entry->buff_type = GEN_BUFF_TYPE; 4937 sglq_entry->buff_type = GEN_BUFF_TYPE;
4813 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4938 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4814 if (sglq_entry->virt == NULL) { 4939 if (sglq_entry->virt == NULL) {
@@ -4857,24 +4982,20 @@ int
4857lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4982lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4858{ 4983{
4859 int rc = 0; 4984 int rc = 0;
4860 int longs;
4861 uint16_t rpi_count;
4862 struct lpfc_rpi_hdr *rpi_hdr; 4985 struct lpfc_rpi_hdr *rpi_hdr;
4863 4986
4864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4987 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4865
4866 /* 4988 /*
4867 * Provision an rpi bitmask range for discovery. The total count 4989 * If the SLI4 port supports extents, posting the rpi header isn't
4868 * is the difference between max and base + 1. 4990 * required. Set the expected maximum count and let the actual value
4991 * get set when extents are fully allocated.
4869 */ 4992 */
4870 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4993 if (!phba->sli4_hba.rpi_hdrs_in_use) {
4871 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4994 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
4872 4995 return rc;
4873 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4996 }
4874 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4997 if (phba->sli4_hba.extents_in_use)
4875 GFP_KERNEL); 4998 return -EIO;
4876 if (!phba->sli4_hba.rpi_bmask)
4877 return -ENOMEM;
4878 4999
4879 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5000 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4880 if (!rpi_hdr) { 5001 if (!rpi_hdr) {
@@ -4908,11 +5029,28 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4908 struct lpfc_rpi_hdr *rpi_hdr; 5029 struct lpfc_rpi_hdr *rpi_hdr;
4909 uint32_t rpi_count; 5030 uint32_t rpi_count;
4910 5031
5032 /*
5033 * If the SLI4 port supports extents, posting the rpi header isn't
5034 * required. Set the expected maximum count and let the actual value
5035 * get set when extents are fully allocated.
5036 */
5037 if (!phba->sli4_hba.rpi_hdrs_in_use)
5038 return NULL;
5039 if (phba->sli4_hba.extents_in_use)
5040 return NULL;
5041
5042 /* The limit on the logical index is just the max_rpi count. */
4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 5043 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4912 phba->sli4_hba.max_cfg_param.max_rpi - 1; 5044 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4913 5045
4914 spin_lock_irq(&phba->hbalock); 5046 spin_lock_irq(&phba->hbalock);
4915 curr_rpi_range = phba->sli4_hba.next_rpi; 5047 /*
5048 * Establish the starting RPI in this header block. The starting
5049 * rpi is normalized to a zero base because the physical rpi is
5050 * port based.
5051 */
5052 curr_rpi_range = phba->sli4_hba.next_rpi -
5053 phba->sli4_hba.max_cfg_param.rpi_base;
4916 spin_unlock_irq(&phba->hbalock); 5054 spin_unlock_irq(&phba->hbalock);
4917 5055
4918 /* 5056 /*
@@ -4925,6 +5063,8 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4925 else 5063 else
4926 rpi_count = LPFC_RPI_HDR_COUNT; 5064 rpi_count = LPFC_RPI_HDR_COUNT;
4927 5065
5066 if (!rpi_count)
5067 return NULL;
4928 /* 5068 /*
4929 * First allocate the protocol header region for the port. The 5069 * First allocate the protocol header region for the port. The
4930 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 5070 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -4957,12 +5097,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4957 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 5097 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4958 rpi_hdr->page_count = 1; 5098 rpi_hdr->page_count = 1;
4959 spin_lock_irq(&phba->hbalock); 5099 spin_lock_irq(&phba->hbalock);
4960 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 5100
5101 /* The rpi_hdr stores the logical index only. */
5102 rpi_hdr->start_rpi = curr_rpi_range;
4961 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 5103 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4962 5104
4963 /* 5105 /*
4964 * The next_rpi stores the next module-64 rpi value to post 5106 * The next_rpi stores the next logical module-64 rpi value used
4965 * in any subsequent rpi memory region postings. 5107 * to post physical rpis in subsequent rpi postings.
4966 */ 5108 */
4967 phba->sli4_hba.next_rpi += rpi_count; 5109 phba->sli4_hba.next_rpi += rpi_count;
4968 spin_unlock_irq(&phba->hbalock); 5110 spin_unlock_irq(&phba->hbalock);
@@ -4981,15 +5123,18 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4981 * @phba: pointer to lpfc hba data structure. 5123 * @phba: pointer to lpfc hba data structure.
4982 * 5124 *
4983 * This routine is invoked to remove all memory resources allocated 5125 * This routine is invoked to remove all memory resources allocated
4984 * to support rpis. This routine presumes the caller has released all 5126 * to support rpis for SLI4 ports not supporting extents. This routine
4985 * rpis consumed by fabric or port logins and is prepared to have 5127 * presumes the caller has released all rpis consumed by fabric or port
4986 * the header pages removed. 5128 * logins and is prepared to have the header pages removed.
4987 **/ 5129 **/
4988void 5130void
4989lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 5131lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4990{ 5132{
4991 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 5133 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4992 5134
5135 if (!phba->sli4_hba.rpi_hdrs_in_use)
5136 goto exit;
5137
4993 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 5138 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4994 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 5139 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4995 list_del(&rpi_hdr->list); 5140 list_del(&rpi_hdr->list);
@@ -4998,9 +5143,9 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4998 kfree(rpi_hdr->dmabuf); 5143 kfree(rpi_hdr->dmabuf);
4999 kfree(rpi_hdr); 5144 kfree(rpi_hdr);
5000 } 5145 }
5001 5146 exit:
5002 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5147 /* There are no rpis available to the port now. */
5003 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 5148 phba->sli4_hba.next_rpi = 0;
5004} 5149}
5005 5150
5006/** 5151/**
@@ -5487,7 +5632,8 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5487 /* Final checks. The port status should be clean. */ 5632 /* Final checks. The port status should be clean. */
5488 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 5633 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5489 &reg_data.word0) || 5634 &reg_data.word0) ||
5490 bf_get(lpfc_sliport_status_err, &reg_data)) { 5635 (bf_get(lpfc_sliport_status_err, &reg_data) &&
5636 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
5491 phba->work_status[0] = 5637 phba->work_status[0] =
5492 readl(phba->sli4_hba.u.if_type2. 5638 readl(phba->sli4_hba.u.if_type2.
5493 ERR1regaddr); 5639 ERR1regaddr);
@@ -5741,7 +5887,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5741{ 5887{
5742 LPFC_MBOXQ_t *pmb; 5888 LPFC_MBOXQ_t *pmb;
5743 struct lpfc_mbx_read_config *rd_config; 5889 struct lpfc_mbx_read_config *rd_config;
5744 uint32_t rc = 0; 5890 union lpfc_sli4_cfg_shdr *shdr;
5891 uint32_t shdr_status, shdr_add_status;
5892 struct lpfc_mbx_get_func_cfg *get_func_cfg;
5893 struct lpfc_rsrc_desc_fcfcoe *desc;
5894 uint32_t desc_count;
5895 int length, i, rc = 0;
5745 5896
5746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5897 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5747 if (!pmb) { 5898 if (!pmb) {
@@ -5763,6 +5914,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5763 rc = -EIO; 5914 rc = -EIO;
5764 } else { 5915 } else {
5765 rd_config = &pmb->u.mqe.un.rd_config; 5916 rd_config = &pmb->u.mqe.un.rd_config;
5917 phba->sli4_hba.extents_in_use =
5918 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
5766 phba->sli4_hba.max_cfg_param.max_xri = 5919 phba->sli4_hba.max_cfg_param.max_xri =
5767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5920 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5768 phba->sli4_hba.max_cfg_param.xri_base = 5921 phba->sli4_hba.max_cfg_param.xri_base =
@@ -5781,8 +5934,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5934 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5782 phba->sli4_hba.max_cfg_param.max_fcfi = 5935 phba->sli4_hba.max_cfg_param.max_fcfi =
5783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5936 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5784 phba->sli4_hba.max_cfg_param.fcfi_base =
5785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5786 phba->sli4_hba.max_cfg_param.max_eq = 5937 phba->sli4_hba.max_cfg_param.max_eq =
5787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5938 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5788 phba->sli4_hba.max_cfg_param.max_rq = 5939 phba->sli4_hba.max_cfg_param.max_rq =
@@ -5800,11 +5951,13 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5800 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5951 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5801 phba->max_vports = phba->max_vpi; 5952 phba->max_vports = phba->max_vpi;
5802 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5953 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5803 "2003 cfg params XRI(B:%d M:%d), " 5954 "2003 cfg params Extents? %d "
5955 "XRI(B:%d M:%d), "
5804 "VPI(B:%d M:%d) " 5956 "VPI(B:%d M:%d) "
5805 "VFI(B:%d M:%d) " 5957 "VFI(B:%d M:%d) "
5806 "RPI(B:%d M:%d) " 5958 "RPI(B:%d M:%d) "
5807 "FCFI(B:%d M:%d)\n", 5959 "FCFI(Count:%d)\n",
5960 phba->sli4_hba.extents_in_use,
5808 phba->sli4_hba.max_cfg_param.xri_base, 5961 phba->sli4_hba.max_cfg_param.xri_base,
5809 phba->sli4_hba.max_cfg_param.max_xri, 5962 phba->sli4_hba.max_cfg_param.max_xri,
5810 phba->sli4_hba.max_cfg_param.vpi_base, 5963 phba->sli4_hba.max_cfg_param.vpi_base,
@@ -5813,10 +5966,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5813 phba->sli4_hba.max_cfg_param.max_vfi, 5966 phba->sli4_hba.max_cfg_param.max_vfi,
5814 phba->sli4_hba.max_cfg_param.rpi_base, 5967 phba->sli4_hba.max_cfg_param.rpi_base,
5815 phba->sli4_hba.max_cfg_param.max_rpi, 5968 phba->sli4_hba.max_cfg_param.max_rpi,
5816 phba->sli4_hba.max_cfg_param.fcfi_base,
5817 phba->sli4_hba.max_cfg_param.max_fcfi); 5969 phba->sli4_hba.max_cfg_param.max_fcfi);
5818 } 5970 }
5819 mempool_free(pmb, phba->mbox_mem_pool); 5971
5972 if (rc)
5973 goto read_cfg_out;
5820 5974
5821 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5975 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5822 if (phba->cfg_hba_queue_depth > 5976 if (phba->cfg_hba_queue_depth >
@@ -5825,6 +5979,65 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
5825 phba->cfg_hba_queue_depth = 5979 phba->cfg_hba_queue_depth =
5826 phba->sli4_hba.max_cfg_param.max_xri - 5980 phba->sli4_hba.max_cfg_param.max_xri -
5827 lpfc_sli4_get_els_iocb_cnt(phba); 5981 lpfc_sli4_get_els_iocb_cnt(phba);
5982
5983 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5984 LPFC_SLI_INTF_IF_TYPE_2)
5985 goto read_cfg_out;
5986
5987 /* get the pf# and vf# for SLI4 if_type 2 port */
5988 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
5989 sizeof(struct lpfc_sli4_cfg_mhdr));
5990 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
5991 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
5992 length, LPFC_SLI4_MBX_EMBED);
5993
5994 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5995 shdr = (union lpfc_sli4_cfg_shdr *)
5996 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
5997 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5998 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5999 if (rc || shdr_status || shdr_add_status) {
6000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6001 "3026 Mailbox failed , mbxCmd x%x "
6002 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6003 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6004 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6005 rc = -EIO;
6006 goto read_cfg_out;
6007 }
6008
6009 /* search for fc_fcoe resrouce descriptor */
6010 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6011 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6012
6013 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6014 desc = (struct lpfc_rsrc_desc_fcfcoe *)
6015 &get_func_cfg->func_cfg.desc[i];
6016 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6017 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6018 phba->sli4_hba.iov.pf_number =
6019 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6020 phba->sli4_hba.iov.vf_number =
6021 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6022 break;
6023 }
6024 }
6025
6026 if (i < LPFC_RSRC_DESC_MAX_NUM)
6027 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6028 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6029 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6030 phba->sli4_hba.iov.vf_number);
6031 else {
6032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6033 "3028 GET_FUNCTION_CONFIG: failed to find "
6034 "Resrouce Descriptor:x%x\n",
6035 LPFC_RSRC_DESC_TYPE_FCFCOE);
6036 rc = -EIO;
6037 }
6038
6039read_cfg_out:
6040 mempool_free(pmb, phba->mbox_mem_pool);
5828 return rc; 6041 return rc;
5829} 6042}
5830 6043
@@ -6229,8 +6442,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6229 phba->sli4_hba.mbx_cq = NULL; 6442 phba->sli4_hba.mbx_cq = NULL;
6230 6443
6231 /* Release FCP response complete queue */ 6444 /* Release FCP response complete queue */
6232 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6445 fcp_qidx = 0;
6446 do
6233 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6447 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6448 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6234 kfree(phba->sli4_hba.fcp_cq); 6449 kfree(phba->sli4_hba.fcp_cq);
6235 phba->sli4_hba.fcp_cq = NULL; 6450 phba->sli4_hba.fcp_cq = NULL;
6236 6451
@@ -6353,16 +6568,24 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6353 phba->sli4_hba.sp_eq->queue_id); 6568 phba->sli4_hba.sp_eq->queue_id);
6354 6569
6355 /* Set up fast-path FCP Response Complete Queue */ 6570 /* Set up fast-path FCP Response Complete Queue */
6356 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6571 fcp_cqidx = 0;
6572 do {
6357 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6573 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6359 "0526 Fast-path FCP CQ (%d) not " 6575 "0526 Fast-path FCP CQ (%d) not "
6360 "allocated\n", fcp_cqidx); 6576 "allocated\n", fcp_cqidx);
6361 goto out_destroy_fcp_cq; 6577 goto out_destroy_fcp_cq;
6362 } 6578 }
6363 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6579 if (phba->cfg_fcp_eq_count)
6364 phba->sli4_hba.fp_eq[fcp_cqidx], 6580 rc = lpfc_cq_create(phba,
6365 LPFC_WCQ, LPFC_FCP); 6581 phba->sli4_hba.fcp_cq[fcp_cqidx],
6582 phba->sli4_hba.fp_eq[fcp_cqidx],
6583 LPFC_WCQ, LPFC_FCP);
6584 else
6585 rc = lpfc_cq_create(phba,
6586 phba->sli4_hba.fcp_cq[fcp_cqidx],
6587 phba->sli4_hba.sp_eq,
6588 LPFC_WCQ, LPFC_FCP);
6366 if (rc) { 6589 if (rc) {
6367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6368 "0527 Failed setup of fast-path FCP " 6591 "0527 Failed setup of fast-path FCP "
@@ -6371,12 +6594,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6371 } 6594 }
6372 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6373 "2588 FCP CQ setup: cq[%d]-id=%d, " 6596 "2588 FCP CQ setup: cq[%d]-id=%d, "
6374 "parent eq[%d]-id=%d\n", 6597 "parent %seq[%d]-id=%d\n",
6375 fcp_cqidx, 6598 fcp_cqidx,
6376 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6599 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6600 (phba->cfg_fcp_eq_count) ? "" : "sp_",
6377 fcp_cqidx, 6601 fcp_cqidx,
6378 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6602 (phba->cfg_fcp_eq_count) ?
6379 } 6603 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
6604 phba->sli4_hba.sp_eq->queue_id);
6605 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6380 6606
6381 /* 6607 /*
6382 * Set up all the Work Queues (WQs) 6608 * Set up all the Work Queues (WQs)
@@ -6445,7 +6671,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6445 fcp_cq_index, 6671 fcp_cq_index,
6446 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6672 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6447 /* Round robin FCP Work Queue's Completion Queue assignment */ 6673 /* Round robin FCP Work Queue's Completion Queue assignment */
6448 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6674 if (phba->cfg_fcp_eq_count)
6675 fcp_cq_index = ((fcp_cq_index + 1) %
6676 phba->cfg_fcp_eq_count);
6449 } 6677 }
6450 6678
6451 /* 6679 /*
@@ -6827,6 +7055,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
6827 if (rdy_chk < 1000) 7055 if (rdy_chk < 1000)
6828 break; 7056 break;
6829 } 7057 }
7058 /* delay driver action following IF_TYPE_2 function reset */
7059 msleep(100);
6830 break; 7060 break;
6831 case LPFC_SLI_INTF_IF_TYPE_1: 7061 case LPFC_SLI_INTF_IF_TYPE_1:
6832 default: 7062 default:
@@ -7419,11 +7649,15 @@ enable_msix_vectors:
7419 /* 7649 /*
7420 * Assign MSI-X vectors to interrupt handlers 7650 * Assign MSI-X vectors to interrupt handlers
7421 */ 7651 */
7422 7652 if (vectors > 1)
7423 /* The first vector must associated to slow-path handler for MQ */ 7653 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7424 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7654 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7425 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7655 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7426 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7656 else
7657 /* All Interrupts need to be handled by one EQ */
7658 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7659 &lpfc_sli4_intr_handler, IRQF_SHARED,
7660 LPFC_DRIVER_NAME, phba);
7427 if (rc) { 7661 if (rc) {
7428 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7662 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7429 "0485 MSI-X slow-path request_irq failed " 7663 "0485 MSI-X slow-path request_irq failed "
@@ -7765,6 +7999,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7765{ 7999{
7766 int wait_cnt = 0; 8000 int wait_cnt = 0;
7767 LPFC_MBOXQ_t *mboxq; 8001 LPFC_MBOXQ_t *mboxq;
8002 struct pci_dev *pdev = phba->pcidev;
7768 8003
7769 lpfc_stop_hba_timers(phba); 8004 lpfc_stop_hba_timers(phba);
7770 phba->sli4_hba.intr_enable = 0; 8005 phba->sli4_hba.intr_enable = 0;
@@ -7804,6 +8039,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7804 /* Disable PCI subsystem interrupt */ 8039 /* Disable PCI subsystem interrupt */
7805 lpfc_sli4_disable_intr(phba); 8040 lpfc_sli4_disable_intr(phba);
7806 8041
8042 /* Disable SR-IOV if enabled */
8043 if (phba->cfg_sriov_nr_virtfn)
8044 pci_disable_sriov(pdev);
8045
7807 /* Stop kthread signal shall trigger work_done one more time */ 8046 /* Stop kthread signal shall trigger work_done one more time */
7808 kthread_stop(phba->worker_thread); 8047 kthread_stop(phba->worker_thread);
7809 8048
@@ -7878,6 +8117,11 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7878 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 8117 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7879 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 8118 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7880 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 8119 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
8120
8121 /* Make sure that sge_supp_len can be handled by the driver */
8122 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8123 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8124
7881 return rc; 8125 return rc;
7882} 8126}
7883 8127
@@ -7902,6 +8146,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7902 int length; 8146 int length;
7903 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8147 struct lpfc_sli4_parameters *mbx_sli4_parameters;
7904 8148
8149 /*
8150 * By default, the driver assumes the SLI4 port requires RPI
8151 * header postings. The SLI4_PARAM response will correct this
8152 * assumption.
8153 */
8154 phba->sli4_hba.rpi_hdrs_in_use = 1;
8155
7905 /* Read the port's SLI4 Config Parameters */ 8156 /* Read the port's SLI4 Config Parameters */
7906 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 8157 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7907 sizeof(struct lpfc_sli4_cfg_mhdr)); 8158 sizeof(struct lpfc_sli4_cfg_mhdr));
@@ -7938,6 +8189,13 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7938 mbx_sli4_parameters); 8189 mbx_sli4_parameters);
7939 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 8190 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
7940 mbx_sli4_parameters); 8191 mbx_sli4_parameters);
8192 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
8193 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
8194
8195 /* Make sure that sge_supp_len can be handled by the driver */
8196 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
8197 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
8198
7941 return 0; 8199 return 0;
7942} 8200}
7943 8201
@@ -8173,6 +8431,10 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8173 8431
8174 lpfc_debugfs_terminate(vport); 8432 lpfc_debugfs_terminate(vport);
8175 8433
8434 /* Disable SR-IOV if enabled */
8435 if (phba->cfg_sriov_nr_virtfn)
8436 pci_disable_sriov(pdev);
8437
8176 /* Disable interrupt */ 8438 /* Disable interrupt */
8177 lpfc_sli_disable_intr(phba); 8439 lpfc_sli_disable_intr(phba);
8178 8440
@@ -8565,6 +8827,97 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8565} 8827}
8566 8828
8567/** 8829/**
8830 * lpfc_write_firmware - attempt to write a firmware image to the port
8831 * @phba: pointer to lpfc hba data structure.
8832 * @fw: pointer to firmware image returned from request_firmware.
8833 *
8834 * returns the number of bytes written if write is successful.
8835 * returns a negative error value if there were errors.
8836 * returns 0 if firmware matches currently active firmware on port.
8837 **/
8838int
8839lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8840{
8841 char fwrev[32];
8842 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
8843 struct list_head dma_buffer_list;
8844 int i, rc = 0;
8845 struct lpfc_dmabuf *dmabuf, *next;
8846 uint32_t offset = 0, temp_offset = 0;
8847
8848 INIT_LIST_HEAD(&dma_buffer_list);
8849 if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8850 (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) ||
8851 (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8852 (image->size != fw->size)) {
8853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8854 "3022 Invalid FW image found. "
8855 "Magic:%d Type:%x ID:%x\n",
8856 image->magic_number,
8857 bf_get(lpfc_grp_hdr_file_type, image),
8858 bf_get(lpfc_grp_hdr_id, image));
8859 return -EINVAL;
8860 }
8861 lpfc_decode_firmware_rev(phba, fwrev, 1);
8862 if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "3023 Updating Firmware. Current Version:%s "
8865 "New Version:%s\n",
8866 fwrev, image->rev_name);
8867 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8868 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8869 GFP_KERNEL);
8870 if (!dmabuf) {
8871 rc = -ENOMEM;
8872 goto out;
8873 }
8874 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8875 SLI4_PAGE_SIZE,
8876 &dmabuf->phys,
8877 GFP_KERNEL);
8878 if (!dmabuf->virt) {
8879 kfree(dmabuf);
8880 rc = -ENOMEM;
8881 goto out;
8882 }
8883 list_add_tail(&dmabuf->list, &dma_buffer_list);
8884 }
8885 while (offset < fw->size) {
8886 temp_offset = offset;
8887 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8888 if (offset + SLI4_PAGE_SIZE > fw->size) {
8889 temp_offset += fw->size - offset;
8890 memcpy(dmabuf->virt,
8891 fw->data + temp_offset,
8892 fw->size - offset);
8893 break;
8894 }
8895 temp_offset += SLI4_PAGE_SIZE;
8896 memcpy(dmabuf->virt, fw->data + temp_offset,
8897 SLI4_PAGE_SIZE);
8898 }
8899 rc = lpfc_wr_object(phba, &dma_buffer_list,
8900 (fw->size - offset), &offset);
8901 if (rc) {
8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8903 "3024 Firmware update failed. "
8904 "%d\n", rc);
8905 goto out;
8906 }
8907 }
8908 rc = offset;
8909 }
8910out:
8911 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
8912 list_del(&dmabuf->list);
8913 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
8914 dmabuf->virt, dmabuf->phys);
8915 kfree(dmabuf);
8916 }
8917 return rc;
8918}
8919
8920/**
8568 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8921 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8569 * @pdev: pointer to PCI device 8922 * @pdev: pointer to PCI device
8570 * @pid: pointer to PCI device identifier 8923 * @pid: pointer to PCI device identifier
@@ -8591,6 +8944,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8591 int error; 8944 int error;
8592 uint32_t cfg_mode, intr_mode; 8945 uint32_t cfg_mode, intr_mode;
8593 int mcnt; 8946 int mcnt;
8947 int adjusted_fcp_eq_count;
8948 int fcp_qidx;
8949 const struct firmware *fw;
8950 uint8_t file_name[16];
8594 8951
8595 /* Allocate memory for HBA structure */ 8952 /* Allocate memory for HBA structure */
8596 phba = lpfc_hba_alloc(pdev); 8953 phba = lpfc_hba_alloc(pdev);
@@ -8688,11 +9045,25 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8688 error = -ENODEV; 9045 error = -ENODEV;
8689 goto out_free_sysfs_attr; 9046 goto out_free_sysfs_attr;
8690 } 9047 }
8691 /* Default to single FCP EQ for non-MSI-X */ 9048 /* Default to single EQ for non-MSI-X */
8692 if (phba->intr_type != MSIX) 9049 if (phba->intr_type != MSIX)
8693 phba->cfg_fcp_eq_count = 1; 9050 adjusted_fcp_eq_count = 0;
8694 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 9051 else if (phba->sli4_hba.msix_vec_nr <
8695 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9052 phba->cfg_fcp_eq_count + 1)
9053 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9054 else
9055 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9056 /* Free unused EQs */
9057 for (fcp_qidx = adjusted_fcp_eq_count;
9058 fcp_qidx < phba->cfg_fcp_eq_count;
9059 fcp_qidx++) {
9060 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9061 /* do not delete the first fcp_cq */
9062 if (fcp_qidx)
9063 lpfc_sli4_queue_free(
9064 phba->sli4_hba.fcp_cq[fcp_qidx]);
9065 }
9066 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
8696 /* Set up SLI-4 HBA */ 9067 /* Set up SLI-4 HBA */
8697 if (lpfc_sli4_hba_setup(phba)) { 9068 if (lpfc_sli4_hba_setup(phba)) {
8698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8731,6 +9102,14 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8731 /* Perform post initialization setup */ 9102 /* Perform post initialization setup */
8732 lpfc_post_init_setup(phba); 9103 lpfc_post_init_setup(phba);
8733 9104
9105 /* check for firmware upgrade or downgrade */
9106 snprintf(file_name, 16, "%s.grp", phba->ModelName);
9107 error = request_firmware(&fw, file_name, &phba->pcidev->dev);
9108 if (!error) {
9109 lpfc_write_firmware(phba, fw);
9110 release_firmware(fw);
9111 }
9112
8734 /* Check if there are static vports to be created. */ 9113 /* Check if there are static vports to be created. */
8735 lpfc_create_static_vport(phba); 9114 lpfc_create_static_vport(phba);
8736 9115
@@ -9498,6 +9877,10 @@ static struct pci_device_id lpfc_id_table[] = {
9498 PCI_ANY_ID, PCI_ANY_ID, }, 9877 PCI_ANY_ID, PCI_ANY_ID, },
9499 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9878 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9500 PCI_ANY_ID, PCI_ANY_ID, }, 9879 PCI_ANY_ID, PCI_ANY_ID, },
9880 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
9881 PCI_ANY_ID, PCI_ANY_ID, },
9882 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
9883 PCI_ANY_ID, PCI_ANY_ID, },
9501 { 0 } 9884 { 0 }
9502}; 9885};
9503 9886
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e6ce9033f85e..556767028353 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -610,7 +610,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613 mb->un.varRdSparm.vpi = vpi + phba->vpi_base; 613 if (phba->sli_rev >= LPFC_SLI_REV3)
614 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
614 615
615 /* save address for completion */ 616 /* save address for completion */
616 pmb->context1 = mp; 617 pmb->context1 = mp;
@@ -643,9 +644,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
643 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
644 645
645 mb->un.varUnregDID.did = did; 646 mb->un.varUnregDID.did = did;
646 if (vpi != 0xffff)
647 vpi += phba->vpi_base;
648 mb->un.varUnregDID.vpi = vpi; 647 mb->un.varUnregDID.vpi = vpi;
648 if ((vpi != 0xffff) &&
649 (phba->sli_rev == LPFC_SLI_REV4))
650 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
649 651
650 mb->mbxCommand = MBX_UNREG_D_ID; 652 mb->mbxCommand = MBX_UNREG_D_ID;
651 mb->mbxOwner = OWN_HOST; 653 mb->mbxOwner = OWN_HOST;
@@ -738,12 +740,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
738 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 740 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
739 741
740 mb->un.varRegLogin.rpi = 0; 742 mb->un.varRegLogin.rpi = 0;
741 if (phba->sli_rev == LPFC_SLI_REV4) { 743 if (phba->sli_rev == LPFC_SLI_REV4)
742 mb->un.varRegLogin.rpi = rpi; 744 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
743 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) 745 if (phba->sli_rev >= LPFC_SLI_REV3)
744 return 1; 746 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
745 }
746 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
747 mb->un.varRegLogin.did = did; 747 mb->un.varRegLogin.did = did;
748 mb->mbxOwner = OWN_HOST; 748 mb->mbxOwner = OWN_HOST;
749 /* Get a buffer to hold NPorts Service Parameters */ 749 /* Get a buffer to hold NPorts Service Parameters */
@@ -757,7 +757,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " 758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759 "rpi x%x\n", vpi, did, rpi); 759 "rpi x%x\n", vpi, did, rpi);
760 return (1); 760 return 1;
761 } 761 }
762 INIT_LIST_HEAD(&mp->list); 762 INIT_LIST_HEAD(&mp->list);
763 sparam = mp->virt; 763 sparam = mp->virt;
@@ -773,7 +773,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); 773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); 774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775 775
776 return (0); 776 return 0;
777} 777}
778 778
779/** 779/**
@@ -789,6 +789,9 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
789 * 789 *
790 * This routine prepares the mailbox command for unregistering remote port 790 * This routine prepares the mailbox command for unregistering remote port
791 * login. 791 * login.
792 *
793 * For SLI4 ports, the rpi passed to this function must be the physical
794 * rpi value, not the logical index.
792 **/ 795 **/
793void 796void
794lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, 797lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
@@ -799,9 +802,10 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
799 mb = &pmb->u.mb; 802 mb = &pmb->u.mb;
800 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 803 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
801 804
802 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 805 mb->un.varUnregLogin.rpi = rpi;
803 mb->un.varUnregLogin.rsvd1 = 0; 806 mb->un.varUnregLogin.rsvd1 = 0;
804 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base; 807 if (phba->sli_rev >= LPFC_SLI_REV3)
808 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
805 809
806 mb->mbxCommand = MBX_UNREG_LOGIN; 810 mb->mbxCommand = MBX_UNREG_LOGIN;
807 mb->mbxOwner = OWN_HOST; 811 mb->mbxOwner = OWN_HOST;
@@ -825,9 +829,16 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
825 829
826 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
827 if (mbox) { 831 if (mbox) {
828 lpfc_unreg_login(phba, vport->vpi, 832 /*
829 vport->vpi + phba->vpi_base, mbox); 833 * For SLI4 functions, the rpi field is overloaded for
830 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; 834 * the vport context unreg all. This routine passes
835 * 0 for the rpi field in lpfc_unreg_login for compatibility
836 * with SLI3 and then overrides the rpi field with the
837 * expected value for SLI4.
838 */
839 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
840 mbox);
841 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
831 mbox->vport = vport; 842 mbox->vport = vport;
832 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 843 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
833 mbox->context1 = NULL; 844 mbox->context1 = NULL;
@@ -865,9 +876,13 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
865 if ((phba->sli_rev == LPFC_SLI_REV4) && 876 if ((phba->sli_rev == LPFC_SLI_REV4) &&
866 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) 877 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
867 mb->un.varRegVpi.upd = 1; 878 mb->un.varRegVpi.upd = 1;
868 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; 879
880 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
869 mb->un.varRegVpi.sid = vport->fc_myDID; 881 mb->un.varRegVpi.sid = vport->fc_myDID;
870 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 882 if (phba->sli_rev == LPFC_SLI_REV4)
883 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
884 else
885 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
871 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, 886 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
872 sizeof(struct lpfc_name)); 887 sizeof(struct lpfc_name));
873 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); 888 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
@@ -901,10 +916,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
901 MAILBOX_t *mb = &pmb->u.mb; 916 MAILBOX_t *mb = &pmb->u.mb;
902 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 917 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
903 918
904 if (phba->sli_rev < LPFC_SLI_REV4) 919 if (phba->sli_rev == LPFC_SLI_REV3)
905 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; 920 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
906 else 921 else if (phba->sli_rev >= LPFC_SLI_REV4)
907 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base; 922 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
908 923
909 mb->mbxCommand = MBX_UNREG_VPI; 924 mb->mbxCommand = MBX_UNREG_VPI;
910 mb->mbxOwner = OWN_HOST; 925 mb->mbxOwner = OWN_HOST;
@@ -1735,12 +1750,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1735 return length; 1750 return length;
1736 } 1751 }
1737 1752
1738 /* Setup for the none-embedded mbox command */ 1753 /* Setup for the non-embedded mbox command */
1739 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; 1754 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1740 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1755 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1741 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1756 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1742 /* Allocate record for keeping SGE virtual addresses */ 1757 /* Allocate record for keeping SGE virtual addresses */
1743 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1758 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1744 GFP_KERNEL); 1759 GFP_KERNEL);
1745 if (!mbox->sge_array) { 1760 if (!mbox->sge_array) {
1746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
@@ -1790,12 +1805,87 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1790 /* The sub-header is in DMA memory, which needs endian converstion */ 1805 /* The sub-header is in DMA memory, which needs endian converstion */
1791 if (cfg_shdr) 1806 if (cfg_shdr)
1792 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1807 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1793 sizeof(union lpfc_sli4_cfg_shdr)); 1808 sizeof(union lpfc_sli4_cfg_shdr));
1794
1795 return alloc_len; 1809 return alloc_len;
1796} 1810}
1797 1811
1798/** 1812/**
1813 * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
1814 * @phba: pointer to lpfc hba data structure.
1815 * @mbox: pointer to an allocated lpfc mbox resource.
1816 * @exts_count: the number of extents, if required, to allocate.
1817 * @rsrc_type: the resource extent type.
1818 * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
1819 *
1820 * This routine completes the subcommand header for SLI4 resource extent
1821 * mailbox commands. It is called after lpfc_sli4_config. The caller must
1822 * pass an allocated mailbox and the attributes required to initialize the
1823 * mailbox correctly.
1824 *
1825 * Return: the actual length of the mbox command allocated.
1826 **/
1827int
1828lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1829 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1830{
1831 uint8_t opcode = 0;
1832 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1833 void *virtaddr = NULL;
1834
1835 /* Set up SLI4 ioctl command header fields */
1836 if (emb == LPFC_SLI4_MBX_NEMBED) {
1837 /* Get the first SGE entry from the non-embedded DMA memory */
1838 virtaddr = mbox->sge_array->addr[0];
1839 if (virtaddr == NULL)
1840 return 1;
1841 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1842 }
1843
1844 /*
1845 * The resource type is common to all extent Opcodes and resides in the
1846 * same position.
1847 */
1848 if (emb == LPFC_SLI4_MBX_EMBED)
1849 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1850 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1851 rsrc_type);
1852 else {
1853 /* This is DMA data. Byteswap is required. */
1854 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1855 n_rsrc_extnt, rsrc_type);
1856 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1857 &n_rsrc_extnt->word4,
1858 sizeof(uint32_t));
1859 }
1860
1861 /* Complete the initialization for the particular Opcode. */
1862 opcode = lpfc_sli4_mbox_opcode_get(phba, mbox);
1863 switch (opcode) {
1864 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1865 if (emb == LPFC_SLI4_MBX_EMBED)
1866 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1867 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1868 exts_count);
1869 else
1870 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1871 n_rsrc_extnt, exts_count);
1872 break;
1873 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1874 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1875 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1876 /* Initialization is complete.*/
1877 break;
1878 default:
1879 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1880 "2929 Resource Extent Opcode x%x is "
1881 "unsupported\n", opcode);
1882 return 1;
1883 }
1884
1885 return 0;
1886}
1887
1888/**
1799 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command 1889 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1800 * @phba: pointer to lpfc hba data structure. 1890 * @phba: pointer to lpfc hba data structure.
1801 * @mbox: pointer to lpfc mbox command. 1891 * @mbox: pointer to lpfc mbox command.
@@ -1939,9 +2029,12 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1939 bf_set(lpfc_init_vfi_vr, init_vfi, 1); 2029 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1940 bf_set(lpfc_init_vfi_vt, init_vfi, 1); 2030 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1941 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
1942 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); 2032 bf_set(lpfc_init_vfi_vfi, init_vfi,
1943 bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base); 2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1944 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); 2034 bf_set(lpfc_init_vpi_vpi, init_vfi,
2035 vport->phba->vpi_ids[vport->vpi]);
2036 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2037 vport->phba->fcf.fcfi);
1945} 2038}
1946 2039
1947/** 2040/**
@@ -1964,9 +2057,10 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1964 reg_vfi = &mbox->u.mqe.un.reg_vfi; 2057 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1965 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); 2058 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1966 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); 2059 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1967 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); 2060 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2061 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
1968 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 2062 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1969 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); 2063 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
1970 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 2064 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1971 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 2065 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1972 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2066 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
@@ -1997,9 +2091,9 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1997 memset(mbox, 0, sizeof(*mbox)); 2091 memset(mbox, 0, sizeof(*mbox));
1998 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); 2092 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1999 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, 2093 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2000 vpi + phba->vpi_base); 2094 phba->vpi_ids[vpi]);
2001 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, 2095 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2002 phba->pport->vfi + phba->vfi_base); 2096 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2003} 2097}
2004 2098
2005/** 2099/**
@@ -2019,7 +2113,7 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2019 memset(mbox, 0, sizeof(*mbox)); 2113 memset(mbox, 0, sizeof(*mbox));
2020 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 2114 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2021 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, 2115 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2022 vport->vfi + vport->phba->vfi_base); 2116 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2023} 2117}
2024 2118
2025/** 2119/**
@@ -2131,12 +2225,14 @@ lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2131void 2225void
2132lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) 2226lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2133{ 2227{
2228 struct lpfc_hba *phba = ndlp->phba;
2134 struct lpfc_mbx_resume_rpi *resume_rpi; 2229 struct lpfc_mbx_resume_rpi *resume_rpi;
2135 2230
2136 memset(mbox, 0, sizeof(*mbox)); 2231 memset(mbox, 0, sizeof(*mbox));
2137 resume_rpi = &mbox->u.mqe.un.resume_rpi; 2232 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2138 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); 2233 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2139 bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi); 2234 bf_set(lpfc_resume_rpi_index, resume_rpi,
2235 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2140 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2236 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2141 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2237 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2142} 2238}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cbb48ee8b0bb..10d5b5e41499 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -62,7 +62,6 @@ int
62lpfc_mem_alloc(struct lpfc_hba *phba, int align) 62lpfc_mem_alloc(struct lpfc_hba *phba, int align)
63{ 63{
64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
65 int longs;
66 int i; 65 int i;
67 66
68 if (phba->sli_rev == LPFC_SLI_REV4) 67 if (phba->sli_rev == LPFC_SLI_REV4)
@@ -138,17 +137,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
138 phba->lpfc_hrb_pool = NULL; 137 phba->lpfc_hrb_pool = NULL;
139 phba->lpfc_drb_pool = NULL; 138 phba->lpfc_drb_pool = NULL;
140 } 139 }
141 /* vpi zero is reserved for the physical port so add 1 to max */
142 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
143 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
144 if (!phba->vpi_bmask)
145 goto fail_free_dbq_pool;
146 140
147 return 0; 141 return 0;
148
149 fail_free_dbq_pool:
150 pci_pool_destroy(phba->lpfc_drb_pool);
151 phba->lpfc_drb_pool = NULL;
152 fail_free_hrb_pool: 142 fail_free_hrb_pool:
153 pci_pool_destroy(phba->lpfc_hrb_pool); 143 pci_pool_destroy(phba->lpfc_hrb_pool);
154 phba->lpfc_hrb_pool = NULL; 144 phba->lpfc_hrb_pool = NULL;
@@ -191,9 +181,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
191 int i; 181 int i;
192 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 182 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
193 183
194 /* Free VPI bitmask memory */
195 kfree(phba->vpi_bmask);
196
197 /* Free HBQ pools */ 184 /* Free HBQ pools */
198 lpfc_sli_hbqbuf_free_all(phba); 185 lpfc_sli_hbqbuf_free_all(phba);
199 if (phba->lpfc_drb_pool) 186 if (phba->lpfc_drb_pool)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 0d92d4205ea6..2ddd02f7c603 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,11 +350,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
350 ndlp->nlp_maxframe = 350 ndlp->nlp_maxframe =
351 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 351 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
352 352
353 /* 353 /* no need to reg_login if we are already in one of these states */
354 * Need to unreg_login if we are already in one of these states and
355 * change to NPR state. This will block the port until after the ACC
356 * completes and the reg_login is issued and completed.
357 */
358 switch (ndlp->nlp_state) { 354 switch (ndlp->nlp_state) {
359 case NLP_STE_NPR_NODE: 355 case NLP_STE_NPR_NODE:
360 if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 356 if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -363,9 +359,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
363 case NLP_STE_PRLI_ISSUE: 359 case NLP_STE_PRLI_ISSUE:
364 case NLP_STE_UNMAPPED_NODE: 360 case NLP_STE_UNMAPPED_NODE:
365 case NLP_STE_MAPPED_NODE: 361 case NLP_STE_MAPPED_NODE:
366 lpfc_unreg_rpi(vport, ndlp); 362 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
367 ndlp->nlp_prev_state = ndlp->nlp_state; 363 return 1;
368 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
369 } 364 }
370 365
371 if ((vport->fc_flag & FC_PT2PT) && 366 if ((vport->fc_flag & FC_PT2PT) &&
@@ -657,6 +652,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
657 lpfc_unreg_rpi(vport, ndlp); 652 lpfc_unreg_rpi(vport, ndlp);
658 return 0; 653 return 0;
659} 654}
655
660/** 656/**
661 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. 657 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
662 * @phba : Pointer to lpfc_hba structure. 658 * @phba : Pointer to lpfc_hba structure.
@@ -1399,8 +1395,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1399 if (mb->mbxStatus) { 1395 if (mb->mbxStatus) {
1400 /* RegLogin failed */ 1396 /* RegLogin failed */
1401 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1397 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1402 "0246 RegLogin failed Data: x%x x%x x%x\n", 1398 "0246 RegLogin failed Data: x%x x%x x%x x%x "
1403 did, mb->mbxStatus, vport->port_state); 1399 "x%x\n",
1400 did, mb->mbxStatus, vport->port_state,
1401 mb->un.varRegLogin.vpi,
1402 mb->un.varRegLogin.rpi);
1404 /* 1403 /*
1405 * If RegLogin failed due to lack of HBA resources do not 1404 * If RegLogin failed due to lack of HBA resources do not
1406 * retry discovery. 1405 * retry discovery.
@@ -1424,7 +1423,10 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1424 return ndlp->nlp_state; 1423 return ndlp->nlp_state;
1425 } 1424 }
1426 1425
1427 ndlp->nlp_rpi = mb->un.varWords[0]; 1426 /* SLI4 ports have preallocated logical rpis. */
1427 if (vport->phba->sli_rev < LPFC_SLI_REV4)
1428 ndlp->nlp_rpi = mb->un.varWords[0];
1429
1428 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1430 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1429 1431
1430 /* Only if we are not a fabric nport do we issue PRLI */ 1432 /* Only if we are not a fabric nport do we issue PRLI */
@@ -2025,7 +2027,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2025 MAILBOX_t *mb = &pmb->u.mb; 2027 MAILBOX_t *mb = &pmb->u.mb;
2026 2028
2027 if (!mb->mbxStatus) { 2029 if (!mb->mbxStatus) {
2028 ndlp->nlp_rpi = mb->un.varWords[0]; 2030 /* SLI4 ports have preallocated logical rpis. */
2031 if (vport->phba->sli_rev < LPFC_SLI_REV4)
2032 ndlp->nlp_rpi = mb->un.varWords[0];
2029 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 2033 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2030 } else { 2034 } else {
2031 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 2035 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 84e4481b2406..3ccc97496ebf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -743,7 +743,14 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
743 if (bcnt == 0) 743 if (bcnt == 0)
744 continue; 744 continue;
745 /* Now, post the SCSI buffer list sgls as a block */ 745 /* Now, post the SCSI buffer list sgls as a block */
746 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 746 if (!phba->sli4_hba.extents_in_use)
747 status = lpfc_sli4_post_scsi_sgl_block(phba,
748 &sblist,
749 bcnt);
750 else
751 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
752 &sblist,
753 bcnt);
747 /* Reset SCSI buffer count for next round of posting */ 754 /* Reset SCSI buffer count for next round of posting */
748 bcnt = 0; 755 bcnt = 0;
749 while (!list_empty(&sblist)) { 756 while (!list_empty(&sblist)) {
@@ -787,7 +794,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
787 dma_addr_t pdma_phys_fcp_cmd; 794 dma_addr_t pdma_phys_fcp_cmd;
788 dma_addr_t pdma_phys_fcp_rsp; 795 dma_addr_t pdma_phys_fcp_rsp;
789 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 796 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
790 uint16_t iotag, last_xritag = NO_XRI; 797 uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
791 int status = 0, index; 798 int status = 0, index;
792 int bcnt; 799 int bcnt;
793 int non_sequential_xri = 0; 800 int non_sequential_xri = 0;
@@ -823,13 +830,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
823 break; 830 break;
824 } 831 }
825 832
826 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); 833 lxri = lpfc_sli4_next_xritag(phba);
827 if (psb->cur_iocbq.sli4_xritag == NO_XRI) { 834 if (lxri == NO_XRI) {
828 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 835 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
829 psb->data, psb->dma_handle); 836 psb->data, psb->dma_handle);
830 kfree(psb); 837 kfree(psb);
831 break; 838 break;
832 } 839 }
840 psb->cur_iocbq.sli4_lxritag = lxri;
841 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
833 if (last_xritag != NO_XRI 842 if (last_xritag != NO_XRI
834 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { 843 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
835 non_sequential_xri = 1; 844 non_sequential_xri = 1;
@@ -861,6 +870,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
861 */ 870 */
862 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 871 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
863 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 872 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
873 sgl->word2 = le32_to_cpu(sgl->word2);
864 bf_set(lpfc_sli4_sge_last, sgl, 0); 874 bf_set(lpfc_sli4_sge_last, sgl, 0);
865 sgl->word2 = cpu_to_le32(sgl->word2); 875 sgl->word2 = cpu_to_le32(sgl->word2);
866 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 876 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
@@ -869,6 +879,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
869 /* Setup the physical region for the FCP RSP */ 879 /* Setup the physical region for the FCP RSP */
870 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 880 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
871 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 881 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
882 sgl->word2 = le32_to_cpu(sgl->word2);
872 bf_set(lpfc_sli4_sge_last, sgl, 1); 883 bf_set(lpfc_sli4_sge_last, sgl, 1);
873 sgl->word2 = cpu_to_le32(sgl->word2); 884 sgl->word2 = cpu_to_le32(sgl->word2);
874 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 885 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
@@ -914,7 +925,21 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
914 } 925 }
915 } 926 }
916 if (bcnt) { 927 if (bcnt) {
917 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 928 if (!phba->sli4_hba.extents_in_use)
929 status = lpfc_sli4_post_scsi_sgl_block(phba,
930 &sblist,
931 bcnt);
932 else
933 status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
934 &sblist,
935 bcnt);
936
937 if (status) {
938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
939 "3021 SCSI SGL post error %d\n",
940 status);
941 bcnt = 0;
942 }
918 /* Reset SCSI buffer count for next round of posting */ 943 /* Reset SCSI buffer count for next round of posting */
919 while (!list_empty(&sblist)) { 944 while (!list_empty(&sblist)) {
920 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 945 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
@@ -2081,6 +2106,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2081 dma_len = sg_dma_len(sgel); 2106 dma_len = sg_dma_len(sgel);
2082 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2107 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2083 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2108 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2109 sgl->word2 = le32_to_cpu(sgl->word2);
2084 if ((num_bde + 1) == nseg) 2110 if ((num_bde + 1) == nseg)
2085 bf_set(lpfc_sli4_sge_last, sgl, 1); 2111 bf_set(lpfc_sli4_sge_last, sgl, 1);
2086 else 2112 else
@@ -2794,6 +2820,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2794 * of the scsi_cmnd request_buffer 2820 * of the scsi_cmnd request_buffer
2795 */ 2821 */
2796 piocbq->iocb.ulpContext = pnode->nlp_rpi; 2822 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2823 if (phba->sli_rev == LPFC_SLI_REV4)
2824 piocbq->iocb.ulpContext =
2825 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
2797 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 2826 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2798 piocbq->iocb.ulpFCP2Rcvy = 1; 2827 piocbq->iocb.ulpFCP2Rcvy = 1;
2799 else 2828 else
@@ -2807,7 +2836,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2807} 2836}
2808 2837
2809/** 2838/**
2810 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit 2839 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
2811 * @vport: The virtual port for which this call is being executed. 2840 * @vport: The virtual port for which this call is being executed.
2812 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2841 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2813 * @lun: Logical unit number. 2842 * @lun: Logical unit number.
@@ -2851,6 +2880,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2851 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2880 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2852 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2881 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2853 piocb->ulpContext = ndlp->nlp_rpi; 2882 piocb->ulpContext = ndlp->nlp_rpi;
2883 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
2884 piocb->ulpContext =
2885 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
2886 }
2854 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 2887 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2855 piocb->ulpFCP2Rcvy = 1; 2888 piocb->ulpFCP2Rcvy = 1;
2856 } 2889 }
@@ -3405,9 +3438,10 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3405 3438
3406 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3439 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3407 "0702 Issue %s to TGT %d LUN %d " 3440 "0702 Issue %s to TGT %d LUN %d "
3408 "rpi x%x nlp_flag x%x\n", 3441 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
3409 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3442 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3410 pnode->nlp_rpi, pnode->nlp_flag); 3443 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
3444 iocbq->iocb_flag);
3411 3445
3412 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3446 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3413 iocbq, iocbqrsp, lpfc_cmd->timeout); 3447 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -3419,10 +3453,12 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3419 ret = FAILED; 3453 ret = FAILED;
3420 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3454 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3421 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3455 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3422 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", 3456 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
3457 "iocb_flag x%x\n",
3423 lpfc_taskmgmt_name(task_mgmt_cmd), 3458 lpfc_taskmgmt_name(task_mgmt_cmd),
3424 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3459 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3425 iocbqrsp->iocb.un.ulpWord[4]); 3460 iocbqrsp->iocb.un.ulpWord[4],
3461 iocbq->iocb_flag);
3426 } else if (status == IOCB_BUSY) 3462 } else if (status == IOCB_BUSY)
3427 ret = FAILED; 3463 ret = FAILED;
3428 else 3464 else
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fd5835e1c039..98999bbd8cbf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -65,6 +65,9 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *); 65 struct lpfc_iocbq *);
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *); 67 struct hbq_dmabuf *);
68static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
70
68static IOCB_t * 71static IOCB_t *
69lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 72lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
70{ 73{
@@ -456,7 +459,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
456 struct lpfc_iocbq * iocbq = NULL; 459 struct lpfc_iocbq * iocbq = NULL;
457 460
458 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 461 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
459
460 if (iocbq) 462 if (iocbq)
461 phba->iocb_cnt++; 463 phba->iocb_cnt++;
462 if (phba->iocb_cnt > phba->iocb_max) 464 if (phba->iocb_cnt > phba->iocb_max)
@@ -479,13 +481,10 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
479static struct lpfc_sglq * 481static struct lpfc_sglq *
480__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 482__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
481{ 483{
482 uint16_t adj_xri;
483 struct lpfc_sglq *sglq; 484 struct lpfc_sglq *sglq;
484 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 485
485 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
486 return NULL; 487 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
487 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
488 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
489 return sglq; 488 return sglq;
490} 489}
491 490
@@ -504,12 +503,9 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
504struct lpfc_sglq * 503struct lpfc_sglq *
505__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 504__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
506{ 505{
507 uint16_t adj_xri;
508 struct lpfc_sglq *sglq; 506 struct lpfc_sglq *sglq;
509 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 507
510 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 508 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
511 return NULL;
512 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
513 return sglq; 509 return sglq;
514} 510}
515 511
@@ -532,7 +528,6 @@ static int
532__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 528__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
533 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 529 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
534{ 530{
535 uint16_t adj_xri;
536 struct lpfc_node_rrq *rrq; 531 struct lpfc_node_rrq *rrq;
537 int empty; 532 int empty;
538 uint32_t did = 0; 533 uint32_t did = 0;
@@ -553,21 +548,19 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
553 /* 548 /*
554 * set the active bit even if there is no mem available. 549 * set the active bit even if there is no mem available.
555 */ 550 */
556 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
557
558 if (NLP_CHK_FREE_REQ(ndlp)) 551 if (NLP_CHK_FREE_REQ(ndlp))
559 goto out; 552 goto out;
560 553
561 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 554 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
562 goto out; 555 goto out;
563 556
564 if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 557 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
565 goto out; 558 goto out;
566 559
567 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
568 if (rrq) { 561 if (rrq) {
569 rrq->send_rrq = send_rrq; 562 rrq->send_rrq = send_rrq;
570 rrq->xritag = xritag; 563 rrq->xritag = phba->sli4_hba.xri_ids[xritag];
571 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
572 rrq->ndlp = ndlp; 565 rrq->ndlp = ndlp;
573 rrq->nlp_DID = ndlp->nlp_DID; 566 rrq->nlp_DID = ndlp->nlp_DID;
@@ -603,7 +596,6 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
603 uint16_t xritag, 596 uint16_t xritag,
604 struct lpfc_node_rrq *rrq) 597 struct lpfc_node_rrq *rrq)
605{ 598{
606 uint16_t adj_xri;
607 struct lpfc_nodelist *ndlp = NULL; 599 struct lpfc_nodelist *ndlp = NULL;
608 600
609 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 601 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
@@ -619,8 +611,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
619 if (!ndlp) 611 if (!ndlp)
620 goto out; 612 goto out;
621 613
622 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 614 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
623 if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
624 rrq->send_rrq = 0; 615 rrq->send_rrq = 0;
625 rrq->xritag = 0; 616 rrq->xritag = 0;
626 rrq->rrq_stop_time = 0; 617 rrq->rrq_stop_time = 0;
@@ -796,12 +787,9 @@ int
796lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 787lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
797 uint16_t xritag) 788 uint16_t xritag)
798{ 789{
799 uint16_t adj_xri;
800
801 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
802 if (!ndlp) 790 if (!ndlp)
803 return 0; 791 return 0;
804 if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 792 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
805 return 1; 793 return 1;
806 else 794 else
807 return 0; 795 return 0;
@@ -841,7 +829,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
841 * @piocb: Pointer to the iocbq. 829 * @piocb: Pointer to the iocbq.
842 * 830 *
843 * This function is called with hbalock held. This function 831 * This function is called with hbalock held. This function
844 * Gets a new driver sglq object from the sglq list. If the 832 * gets a new driver sglq object from the sglq list. If the
845 * list is not empty then it is successful, it returns pointer to the newly 833 * list is not empty then it is successful, it returns pointer to the newly
846 * allocated sglq object else it returns NULL. 834 * allocated sglq object else it returns NULL.
847 **/ 835 **/
@@ -851,7 +839,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
851 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 839 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
852 struct lpfc_sglq *sglq = NULL; 840 struct lpfc_sglq *sglq = NULL;
853 struct lpfc_sglq *start_sglq = NULL; 841 struct lpfc_sglq *start_sglq = NULL;
854 uint16_t adj_xri;
855 struct lpfc_scsi_buf *lpfc_cmd; 842 struct lpfc_scsi_buf *lpfc_cmd;
856 struct lpfc_nodelist *ndlp; 843 struct lpfc_nodelist *ndlp;
857 int found = 0; 844 int found = 0;
@@ -870,8 +857,6 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
870 while (!found) { 857 while (!found) {
871 if (!sglq) 858 if (!sglq)
872 return NULL; 859 return NULL;
873 adj_xri = sglq->sli4_xritag -
874 phba->sli4_hba.max_cfg_param.xri_base;
875 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 860 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
876 /* This xri has an rrq outstanding for this DID. 861 /* This xri has an rrq outstanding for this DID.
877 * put it back in the list and get another xri. 862 * put it back in the list and get another xri.
@@ -888,7 +873,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
888 } 873 }
889 sglq->ndlp = ndlp; 874 sglq->ndlp = ndlp;
890 found = 1; 875 found = 1;
891 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 876 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
892 sglq->state = SGL_ALLOCATED; 877 sglq->state = SGL_ALLOCATED;
893 } 878 }
894 return sglq; 879 return sglq;
@@ -944,7 +929,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
944 if (iocbq->sli4_xritag == NO_XRI) 929 if (iocbq->sli4_xritag == NO_XRI)
945 sglq = NULL; 930 sglq = NULL;
946 else 931 else
947 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 932 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
933
948 if (sglq) { 934 if (sglq) {
949 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 935 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
950 (sglq->state != SGL_XRI_ABORTED)) { 936 (sglq->state != SGL_XRI_ABORTED)) {
@@ -971,6 +957,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
971 * Clean all volatile data fields, preserve iotag and node struct. 957 * Clean all volatile data fields, preserve iotag and node struct.
972 */ 958 */
973 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 959 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
960 iocbq->sli4_lxritag = NO_XRI;
974 iocbq->sli4_xritag = NO_XRI; 961 iocbq->sli4_xritag = NO_XRI;
975 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 962 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
976} 963}
@@ -2113,7 +2100,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2113 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2100 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2114 !pmb->u.mb.mbxStatus) { 2101 !pmb->u.mb.mbxStatus) {
2115 rpi = pmb->u.mb.un.varWords[0]; 2102 rpi = pmb->u.mb.un.varWords[0];
2116 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; 2103 vpi = pmb->u.mb.un.varRegLogin.vpi;
2117 lpfc_unreg_login(phba, vpi, rpi, pmb); 2104 lpfc_unreg_login(phba, vpi, rpi, pmb);
2118 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2105 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2119 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -3881,8 +3868,10 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3881 list_del_init(&phba->sli4_hba.els_cq->list); 3868 list_del_init(&phba->sli4_hba.els_cq->list);
3882 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3869 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3883 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3870 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3884 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3871 qindx = 0;
3872 do
3885 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3873 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3874 while (++qindx < phba->cfg_fcp_eq_count);
3886 spin_unlock_irq(&phba->hbalock); 3875 spin_unlock_irq(&phba->hbalock);
3887 3876
3888 /* Now physically reset the device */ 3877 /* Now physically reset the device */
@@ -4318,6 +4307,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4318 continue; 4307 continue;
4319 } else if (rc) 4308 } else if (rc)
4320 break; 4309 break;
4310
4321 phba->link_state = LPFC_INIT_MBX_CMDS; 4311 phba->link_state = LPFC_INIT_MBX_CMDS;
4322 lpfc_config_port(phba, pmb); 4312 lpfc_config_port(phba, pmb);
4323 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4313 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -4421,7 +4411,8 @@ int
4421lpfc_sli_hba_setup(struct lpfc_hba *phba) 4411lpfc_sli_hba_setup(struct lpfc_hba *phba)
4422{ 4412{
4423 uint32_t rc; 4413 uint32_t rc;
4424 int mode = 3; 4414 int mode = 3, i;
4415 int longs;
4425 4416
4426 switch (lpfc_sli_mode) { 4417 switch (lpfc_sli_mode) {
4427 case 2: 4418 case 2:
@@ -4491,6 +4482,35 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4491 if (rc) 4482 if (rc)
4492 goto lpfc_sli_hba_setup_error; 4483 goto lpfc_sli_hba_setup_error;
4493 4484
4485 /* Initialize VPIs. */
4486 if (phba->sli_rev == LPFC_SLI_REV3) {
4487 /*
4488 * The VPI bitmask and physical ID array are allocated
4489 * and initialized once only - at driver load. A port
4490 * reset doesn't need to reinitialize this memory.
4491 */
4492 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4493 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4494 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4495 GFP_KERNEL);
4496 if (!phba->vpi_bmask) {
4497 rc = -ENOMEM;
4498 goto lpfc_sli_hba_setup_error;
4499 }
4500
4501 phba->vpi_ids = kzalloc(
4502 (phba->max_vpi+1) * sizeof(uint16_t),
4503 GFP_KERNEL);
4504 if (!phba->vpi_ids) {
4505 kfree(phba->vpi_bmask);
4506 rc = -ENOMEM;
4507 goto lpfc_sli_hba_setup_error;
4508 }
4509 for (i = 0; i < phba->max_vpi; i++)
4510 phba->vpi_ids[i] = i;
4511 }
4512 }
4513
4494 /* Init HBQs */ 4514 /* Init HBQs */
4495 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4515 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4496 rc = lpfc_sli_hbq_setup(phba); 4516 rc = lpfc_sli_hbq_setup(phba);
@@ -4677,9 +4697,11 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4677 4697
4678 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4698 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4679 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4699 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4680 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4700 fcp_eqidx = 0;
4701 do
4681 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4702 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4682 LPFC_QUEUE_REARM); 4703 LPFC_QUEUE_REARM);
4704 while (++fcp_eqidx < phba->cfg_fcp_eq_count);
4683 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4705 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4684 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4706 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4685 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4707 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -4687,6 +4709,803 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4687} 4709}
4688 4710
4689/** 4711/**
4712 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4713 * @phba: Pointer to HBA context object.
4714 * @type: The resource extent type.
4715 *
4716 * This function allocates all SLI4 resource identifiers.
4717 **/
4718static int
4719lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4720 uint16_t *extnt_count, uint16_t *extnt_size)
4721{
4722 int rc = 0;
4723 uint32_t length;
4724 uint32_t mbox_tmo;
4725 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4726 LPFC_MBOXQ_t *mbox;
4727
4728 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4729 if (!mbox)
4730 return -ENOMEM;
4731
4732 /* Find out how many extents are available for this resource type */
4733 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4734 sizeof(struct lpfc_sli4_cfg_mhdr));
4735 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4736 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4737 length, LPFC_SLI4_MBX_EMBED);
4738
4739 /* Send an extents count of 0 - the GET doesn't use it. */
4740 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4741 LPFC_SLI4_MBX_EMBED);
4742 if (unlikely(rc)) {
4743 rc = -EIO;
4744 goto err_exit;
4745 }
4746
4747 if (!phba->sli4_hba.intr_enable)
4748 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4749 else {
4750 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4751 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4752 }
4753 if (unlikely(rc)) {
4754 rc = -EIO;
4755 goto err_exit;
4756 }
4757
4758 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
4759 if (bf_get(lpfc_mbox_hdr_status,
4760 &rsrc_info->header.cfg_shdr.response)) {
4761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4762 "2930 Failed to get resource extents "
4763 "Status 0x%x Add'l Status 0x%x\n",
4764 bf_get(lpfc_mbox_hdr_status,
4765 &rsrc_info->header.cfg_shdr.response),
4766 bf_get(lpfc_mbox_hdr_add_status,
4767 &rsrc_info->header.cfg_shdr.response));
4768 rc = -EIO;
4769 goto err_exit;
4770 }
4771
4772 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
4773 &rsrc_info->u.rsp);
4774 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4775 &rsrc_info->u.rsp);
4776 err_exit:
4777 mempool_free(mbox, phba->mbox_mem_pool);
4778 return rc;
4779}
4780
4781/**
4782 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
4783 * @phba: Pointer to HBA context object.
4784 * @type: The extent type to check.
4785 *
4786 * This function reads the current available extents from the port and checks
4787 * if the extent count or extent size has changed since the last access.
4788 * Callers use this routine post port reset to understand if there is a
4789 * extent reprovisioning requirement.
4790 *
4791 * Returns:
4792 * -Error: error indicates problem.
4793 * 1: Extent count or size has changed.
4794 * 0: No changes.
4795 **/
4796static int
4797lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
4798{
4799 uint16_t curr_ext_cnt, rsrc_ext_cnt;
4800 uint16_t size_diff, rsrc_ext_size;
4801 int rc = 0;
4802 struct lpfc_rsrc_blks *rsrc_entry;
4803 struct list_head *rsrc_blk_list = NULL;
4804
4805 size_diff = 0;
4806 curr_ext_cnt = 0;
4807 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4808 &rsrc_ext_cnt,
4809 &rsrc_ext_size);
4810 if (unlikely(rc))
4811 return -EIO;
4812
4813 switch (type) {
4814 case LPFC_RSC_TYPE_FCOE_RPI:
4815 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
4816 break;
4817 case LPFC_RSC_TYPE_FCOE_VPI:
4818 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
4819 break;
4820 case LPFC_RSC_TYPE_FCOE_XRI:
4821 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
4822 break;
4823 case LPFC_RSC_TYPE_FCOE_VFI:
4824 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
4825 break;
4826 default:
4827 break;
4828 }
4829
4830 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
4831 curr_ext_cnt++;
4832 if (rsrc_entry->rsrc_size != rsrc_ext_size)
4833 size_diff++;
4834 }
4835
4836 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
4837 rc = 1;
4838
4839 return rc;
4840}
4841
4842/**
4843 * lpfc_sli4_cfg_post_extnts -
4844 * @phba: Pointer to HBA context object.
4845 * @extnt_cnt - number of available extents.
4846 * @type - the extent type (rpi, xri, vfi, vpi).
4847 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
4848 * @mbox - pointer to the caller's allocated mailbox structure.
4849 *
4850 * This function executes the extents allocation request. It also
4851 * takes care of the amount of memory needed to allocate or get the
4852 * allocated extents. It is the caller's responsibility to evaluate
4853 * the response.
4854 *
4855 * Returns:
4856 * -Error: Error value describes the condition found.
4857 * 0: if successful
4858 **/
4859static int
4860lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4861 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
4862{
4863 int rc = 0;
4864 uint32_t req_len;
4865 uint32_t emb_len;
4866 uint32_t alloc_len, mbox_tmo;
4867
4868 /* Calculate the total requested length of the dma memory */
4869 req_len = *extnt_cnt * sizeof(uint16_t);
4870
4871 /*
4872 * Calculate the size of an embedded mailbox. The uint32_t
4873 * accounts for extents-specific word.
4874 */
4875 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
4876 sizeof(uint32_t);
4877
4878 /*
4879 * Presume the allocation and response will fit into an embedded
4880 * mailbox. If not true, reconfigure to a non-embedded mailbox.
4881 */
4882 *emb = LPFC_SLI4_MBX_EMBED;
4883 if (req_len > emb_len) {
4884 req_len = *extnt_cnt * sizeof(uint16_t) +
4885 sizeof(union lpfc_sli4_cfg_shdr) +
4886 sizeof(uint32_t);
4887 *emb = LPFC_SLI4_MBX_NEMBED;
4888 }
4889
4890 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4891 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
4892 req_len, *emb);
4893 if (alloc_len < req_len) {
4894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4895 "9000 Allocated DMA memory size (x%x) is "
4896 "less than the requested DMA memory "
4897 "size (x%x)\n", alloc_len, req_len);
4898 return -ENOMEM;
4899 }
4900 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
4901 if (unlikely(rc))
4902 return -EIO;
4903
4904 if (!phba->sli4_hba.intr_enable)
4905 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4906 else {
4907 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
4908 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4909 }
4910
4911 if (unlikely(rc))
4912 rc = -EIO;
4913 return rc;
4914}
4915
4916/**
4917 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
4918 * @phba: Pointer to HBA context object.
4919 * @type: The resource extent type to allocate.
4920 *
4921 * This function allocates the number of elements for the specified
4922 * resource type.
4923 **/
4924static int
4925lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
4926{
4927 bool emb = false;
4928 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
4929 uint16_t rsrc_id, rsrc_start, j, k;
4930 uint16_t *ids;
4931 int i, rc;
4932 unsigned long longs;
4933 unsigned long *bmask;
4934 struct lpfc_rsrc_blks *rsrc_blks;
4935 LPFC_MBOXQ_t *mbox;
4936 uint32_t length;
4937 struct lpfc_id_range *id_array = NULL;
4938 void *virtaddr = NULL;
4939 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
4940 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
4941 struct list_head *ext_blk_list;
4942
4943 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4944 &rsrc_cnt,
4945 &rsrc_size);
4946 if (unlikely(rc))
4947 return -EIO;
4948
4949 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
4950 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4951 "3009 No available Resource Extents "
4952 "for resource type 0x%x: Count: 0x%x, "
4953 "Size 0x%x\n", type, rsrc_cnt,
4954 rsrc_size);
4955 return -ENOMEM;
4956 }
4957
4958 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
4959 "2903 Available Resource Extents "
4960 "for resource type 0x%x: Count: 0x%x, "
4961 "Size 0x%x\n", type, rsrc_cnt,
4962 rsrc_size);
4963
4964 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4965 if (!mbox)
4966 return -ENOMEM;
4967
4968 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
4969 if (unlikely(rc)) {
4970 rc = -EIO;
4971 goto err_exit;
4972 }
4973
4974 /*
4975 * Figure out where the response is located. Then get local pointers
4976 * to the response data. The port does not guarantee to respond to
4977 * all extents counts request so update the local variable with the
4978 * allocated count from the port.
4979 */
4980 if (emb == LPFC_SLI4_MBX_EMBED) {
4981 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
4982 id_array = &rsrc_ext->u.rsp.id[0];
4983 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
4984 } else {
4985 virtaddr = mbox->sge_array->addr[0];
4986 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
4987 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
4988 id_array = &n_rsrc->id;
4989 }
4990
4991 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4992 rsrc_id_cnt = rsrc_cnt * rsrc_size;
4993
4994 /*
4995 * Based on the resource size and count, correct the base and max
4996 * resource values.
4997 */
4998 length = sizeof(struct lpfc_rsrc_blks);
4999 switch (type) {
5000 case LPFC_RSC_TYPE_FCOE_RPI:
5001 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5002 sizeof(unsigned long),
5003 GFP_KERNEL);
5004 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5005 rc = -ENOMEM;
5006 goto err_exit;
5007 }
5008 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5009 sizeof(uint16_t),
5010 GFP_KERNEL);
5011 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5012 kfree(phba->sli4_hba.rpi_bmask);
5013 rc = -ENOMEM;
5014 goto err_exit;
5015 }
5016
5017 /*
5018 * The next_rpi was initialized with the maximum available
5019 * count but the port may allocate a smaller number. Catch
5020 * that case and update the next_rpi.
5021 */
5022 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5023
5024 /* Initialize local ptrs for common extent processing later. */
5025 bmask = phba->sli4_hba.rpi_bmask;
5026 ids = phba->sli4_hba.rpi_ids;
5027 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5028 break;
5029 case LPFC_RSC_TYPE_FCOE_VPI:
5030 phba->vpi_bmask = kzalloc(longs *
5031 sizeof(unsigned long),
5032 GFP_KERNEL);
5033 if (unlikely(!phba->vpi_bmask)) {
5034 rc = -ENOMEM;
5035 goto err_exit;
5036 }
5037 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5038 sizeof(uint16_t),
5039 GFP_KERNEL);
5040 if (unlikely(!phba->vpi_ids)) {
5041 kfree(phba->vpi_bmask);
5042 rc = -ENOMEM;
5043 goto err_exit;
5044 }
5045
5046 /* Initialize local ptrs for common extent processing later. */
5047 bmask = phba->vpi_bmask;
5048 ids = phba->vpi_ids;
5049 ext_blk_list = &phba->lpfc_vpi_blk_list;
5050 break;
5051 case LPFC_RSC_TYPE_FCOE_XRI:
5052 phba->sli4_hba.xri_bmask = kzalloc(longs *
5053 sizeof(unsigned long),
5054 GFP_KERNEL);
5055 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5056 rc = -ENOMEM;
5057 goto err_exit;
5058 }
5059 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5060 sizeof(uint16_t),
5061 GFP_KERNEL);
5062 if (unlikely(!phba->sli4_hba.xri_ids)) {
5063 kfree(phba->sli4_hba.xri_bmask);
5064 rc = -ENOMEM;
5065 goto err_exit;
5066 }
5067
5068 /* Initialize local ptrs for common extent processing later. */
5069 bmask = phba->sli4_hba.xri_bmask;
5070 ids = phba->sli4_hba.xri_ids;
5071 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5072 break;
5073 case LPFC_RSC_TYPE_FCOE_VFI:
5074 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5075 sizeof(unsigned long),
5076 GFP_KERNEL);
5077 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5078 rc = -ENOMEM;
5079 goto err_exit;
5080 }
5081 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5082 sizeof(uint16_t),
5083 GFP_KERNEL);
5084 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5085 kfree(phba->sli4_hba.vfi_bmask);
5086 rc = -ENOMEM;
5087 goto err_exit;
5088 }
5089
5090 /* Initialize local ptrs for common extent processing later. */
5091 bmask = phba->sli4_hba.vfi_bmask;
5092 ids = phba->sli4_hba.vfi_ids;
5093 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5094 break;
5095 default:
5096 /* Unsupported Opcode. Fail call. */
5097 id_array = NULL;
5098 bmask = NULL;
5099 ids = NULL;
5100 ext_blk_list = NULL;
5101 goto err_exit;
5102 }
5103
5104 /*
5105 * Complete initializing the extent configuration with the
5106 * allocated ids assigned to this function. The bitmask serves
5107 * as an index into the array and manages the available ids. The
5108 * array just stores the ids communicated to the port via the wqes.
5109 */
5110 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5111 if ((i % 2) == 0)
5112 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5113 &id_array[k]);
5114 else
5115 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5116 &id_array[k]);
5117
5118 rsrc_blks = kzalloc(length, GFP_KERNEL);
5119 if (unlikely(!rsrc_blks)) {
5120 rc = -ENOMEM;
5121 kfree(bmask);
5122 kfree(ids);
5123 goto err_exit;
5124 }
5125 rsrc_blks->rsrc_start = rsrc_id;
5126 rsrc_blks->rsrc_size = rsrc_size;
5127 list_add_tail(&rsrc_blks->list, ext_blk_list);
5128 rsrc_start = rsrc_id;
5129 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5130 phba->sli4_hba.scsi_xri_start = rsrc_start +
5131 lpfc_sli4_get_els_iocb_cnt(phba);
5132
5133 while (rsrc_id < (rsrc_start + rsrc_size)) {
5134 ids[j] = rsrc_id;
5135 rsrc_id++;
5136 j++;
5137 }
5138 /* Entire word processed. Get next word.*/
5139 if ((i % 2) == 1)
5140 k++;
5141 }
5142 err_exit:
5143 lpfc_sli4_mbox_cmd_free(phba, mbox);
5144 return rc;
5145}
5146
5147/**
5148 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5149 * @phba: Pointer to HBA context object.
5150 * @type: the extent's type.
5151 *
5152 * This function deallocates all extents of a particular resource type.
5153 * SLI4 does not allow for deallocating a particular extent range. It
5154 * is the caller's responsibility to release all kernel memory resources.
5155 **/
5156static int
5157lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5158{
5159 int rc;
5160 uint32_t length, mbox_tmo = 0;
5161 LPFC_MBOXQ_t *mbox;
5162 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5163 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5164
5165 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5166 if (!mbox)
5167 return -ENOMEM;
5168
5169 /*
5170 * This function sends an embedded mailbox because it only sends the
5171 * the resource type. All extents of this type are released by the
5172 * port.
5173 */
5174 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5175 sizeof(struct lpfc_sli4_cfg_mhdr));
5176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5177 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5178 length, LPFC_SLI4_MBX_EMBED);
5179
5180 /* Send an extents count of 0 - the dealloc doesn't use it. */
5181 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5182 LPFC_SLI4_MBX_EMBED);
5183 if (unlikely(rc)) {
5184 rc = -EIO;
5185 goto out_free_mbox;
5186 }
5187 if (!phba->sli4_hba.intr_enable)
5188 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5189 else {
5190 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo);
5191 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5192 }
5193 if (unlikely(rc)) {
5194 rc = -EIO;
5195 goto out_free_mbox;
5196 }
5197
5198 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5199 if (bf_get(lpfc_mbox_hdr_status,
5200 &dealloc_rsrc->header.cfg_shdr.response)) {
5201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5202 "2919 Failed to release resource extents "
5203 "for type %d - Status 0x%x Add'l Status 0x%x. "
5204 "Resource memory not released.\n",
5205 type,
5206 bf_get(lpfc_mbox_hdr_status,
5207 &dealloc_rsrc->header.cfg_shdr.response),
5208 bf_get(lpfc_mbox_hdr_add_status,
5209 &dealloc_rsrc->header.cfg_shdr.response));
5210 rc = -EIO;
5211 goto out_free_mbox;
5212 }
5213
5214 /* Release kernel memory resources for the specific type. */
5215 switch (type) {
5216 case LPFC_RSC_TYPE_FCOE_VPI:
5217 kfree(phba->vpi_bmask);
5218 kfree(phba->vpi_ids);
5219 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5220 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5221 &phba->lpfc_vpi_blk_list, list) {
5222 list_del_init(&rsrc_blk->list);
5223 kfree(rsrc_blk);
5224 }
5225 break;
5226 case LPFC_RSC_TYPE_FCOE_XRI:
5227 kfree(phba->sli4_hba.xri_bmask);
5228 kfree(phba->sli4_hba.xri_ids);
5229 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5230 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5231 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5232 list_del_init(&rsrc_blk->list);
5233 kfree(rsrc_blk);
5234 }
5235 break;
5236 case LPFC_RSC_TYPE_FCOE_VFI:
5237 kfree(phba->sli4_hba.vfi_bmask);
5238 kfree(phba->sli4_hba.vfi_ids);
5239 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5240 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5241 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5242 list_del_init(&rsrc_blk->list);
5243 kfree(rsrc_blk);
5244 }
5245 break;
5246 case LPFC_RSC_TYPE_FCOE_RPI:
5247 /* RPI bitmask and physical id array are cleaned up earlier. */
5248 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5249 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5250 list_del_init(&rsrc_blk->list);
5251 kfree(rsrc_blk);
5252 }
5253 break;
5254 default:
5255 break;
5256 }
5257
5258 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5259
5260 out_free_mbox:
5261 mempool_free(mbox, phba->mbox_mem_pool);
5262 return rc;
5263}
5264
5265/**
5266 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5267 * @phba: Pointer to HBA context object.
5268 *
5269 * This function allocates all SLI4 resource identifiers.
5270 **/
5271int
5272lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5273{
5274 int i, rc, error = 0;
5275 uint16_t count, base;
5276 unsigned long longs;
5277
5278 if (phba->sli4_hba.extents_in_use) {
5279 /*
5280 * The port supports resource extents. The XRI, VPI, VFI, RPI
5281 * resource extent count must be read and allocated before
5282 * provisioning the resource id arrays.
5283 */
5284 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5285 LPFC_IDX_RSRC_RDY) {
5286 /*
5287 * Extent-based resources are set - the driver could
5288 * be in a port reset. Figure out if any corrective
5289 * actions need to be taken.
5290 */
5291 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5292 LPFC_RSC_TYPE_FCOE_VFI);
5293 if (rc != 0)
5294 error++;
5295 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5296 LPFC_RSC_TYPE_FCOE_VPI);
5297 if (rc != 0)
5298 error++;
5299 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5300 LPFC_RSC_TYPE_FCOE_XRI);
5301 if (rc != 0)
5302 error++;
5303 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5304 LPFC_RSC_TYPE_FCOE_RPI);
5305 if (rc != 0)
5306 error++;
5307
5308 /*
5309 * It's possible that the number of resources
5310 * provided to this port instance changed between
5311 * resets. Detect this condition and reallocate
5312 * resources. Otherwise, there is no action.
5313 */
5314 if (error) {
5315 lpfc_printf_log(phba, KERN_INFO,
5316 LOG_MBOX | LOG_INIT,
5317 "2931 Detected extent resource "
5318 "change. Reallocating all "
5319 "extents.\n");
5320 rc = lpfc_sli4_dealloc_extent(phba,
5321 LPFC_RSC_TYPE_FCOE_VFI);
5322 rc = lpfc_sli4_dealloc_extent(phba,
5323 LPFC_RSC_TYPE_FCOE_VPI);
5324 rc = lpfc_sli4_dealloc_extent(phba,
5325 LPFC_RSC_TYPE_FCOE_XRI);
5326 rc = lpfc_sli4_dealloc_extent(phba,
5327 LPFC_RSC_TYPE_FCOE_RPI);
5328 } else
5329 return 0;
5330 }
5331
5332 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5333 if (unlikely(rc))
5334 goto err_exit;
5335
5336 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5337 if (unlikely(rc))
5338 goto err_exit;
5339
5340 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5341 if (unlikely(rc))
5342 goto err_exit;
5343
5344 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5345 if (unlikely(rc))
5346 goto err_exit;
5347 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5348 LPFC_IDX_RSRC_RDY);
5349 return rc;
5350 } else {
5351 /*
5352 * The port does not support resource extents. The XRI, VPI,
5353 * VFI, RPI resource ids were determined from READ_CONFIG.
5354 * Just allocate the bitmasks and provision the resource id
5355 * arrays. If a port reset is active, the resources don't
5356 * need any action - just exit.
5357 */
5358 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5359 LPFC_IDX_RSRC_RDY)
5360 return 0;
5361
5362 /* RPIs. */
5363 count = phba->sli4_hba.max_cfg_param.max_rpi;
5364 base = phba->sli4_hba.max_cfg_param.rpi_base;
5365 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5366 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5367 sizeof(unsigned long),
5368 GFP_KERNEL);
5369 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5370 rc = -ENOMEM;
5371 goto err_exit;
5372 }
5373 phba->sli4_hba.rpi_ids = kzalloc(count *
5374 sizeof(uint16_t),
5375 GFP_KERNEL);
5376 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5377 rc = -ENOMEM;
5378 goto free_rpi_bmask;
5379 }
5380
5381 for (i = 0; i < count; i++)
5382 phba->sli4_hba.rpi_ids[i] = base + i;
5383
5384 /* VPIs. */
5385 count = phba->sli4_hba.max_cfg_param.max_vpi;
5386 base = phba->sli4_hba.max_cfg_param.vpi_base;
5387 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5388 phba->vpi_bmask = kzalloc(longs *
5389 sizeof(unsigned long),
5390 GFP_KERNEL);
5391 if (unlikely(!phba->vpi_bmask)) {
5392 rc = -ENOMEM;
5393 goto free_rpi_ids;
5394 }
5395 phba->vpi_ids = kzalloc(count *
5396 sizeof(uint16_t),
5397 GFP_KERNEL);
5398 if (unlikely(!phba->vpi_ids)) {
5399 rc = -ENOMEM;
5400 goto free_vpi_bmask;
5401 }
5402
5403 for (i = 0; i < count; i++)
5404 phba->vpi_ids[i] = base + i;
5405
5406 /* XRIs. */
5407 count = phba->sli4_hba.max_cfg_param.max_xri;
5408 base = phba->sli4_hba.max_cfg_param.xri_base;
5409 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5410 phba->sli4_hba.xri_bmask = kzalloc(longs *
5411 sizeof(unsigned long),
5412 GFP_KERNEL);
5413 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5414 rc = -ENOMEM;
5415 goto free_vpi_ids;
5416 }
5417 phba->sli4_hba.xri_ids = kzalloc(count *
5418 sizeof(uint16_t),
5419 GFP_KERNEL);
5420 if (unlikely(!phba->sli4_hba.xri_ids)) {
5421 rc = -ENOMEM;
5422 goto free_xri_bmask;
5423 }
5424
5425 for (i = 0; i < count; i++)
5426 phba->sli4_hba.xri_ids[i] = base + i;
5427
5428 /* VFIs. */
5429 count = phba->sli4_hba.max_cfg_param.max_vfi;
5430 base = phba->sli4_hba.max_cfg_param.vfi_base;
5431 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5432 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5433 sizeof(unsigned long),
5434 GFP_KERNEL);
5435 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5436 rc = -ENOMEM;
5437 goto free_xri_ids;
5438 }
5439 phba->sli4_hba.vfi_ids = kzalloc(count *
5440 sizeof(uint16_t),
5441 GFP_KERNEL);
5442 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5443 rc = -ENOMEM;
5444 goto free_vfi_bmask;
5445 }
5446
5447 for (i = 0; i < count; i++)
5448 phba->sli4_hba.vfi_ids[i] = base + i;
5449
5450 /*
5451 * Mark all resources ready. An HBA reset doesn't need
5452 * to reset the initialization.
5453 */
5454 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5455 LPFC_IDX_RSRC_RDY);
5456 return 0;
5457 }
5458
5459 free_vfi_bmask:
5460 kfree(phba->sli4_hba.vfi_bmask);
5461 free_xri_ids:
5462 kfree(phba->sli4_hba.xri_ids);
5463 free_xri_bmask:
5464 kfree(phba->sli4_hba.xri_bmask);
5465 free_vpi_ids:
5466 kfree(phba->vpi_ids);
5467 free_vpi_bmask:
5468 kfree(phba->vpi_bmask);
5469 free_rpi_ids:
5470 kfree(phba->sli4_hba.rpi_ids);
5471 free_rpi_bmask:
5472 kfree(phba->sli4_hba.rpi_bmask);
5473 err_exit:
5474 return rc;
5475}
5476
5477/**
5478 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5479 * @phba: Pointer to HBA context object.
5480 *
5481 * This function allocates the number of elements for the specified
5482 * resource type.
5483 **/
5484int
5485lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5486{
5487 if (phba->sli4_hba.extents_in_use) {
5488 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5489 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5490 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5491 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5492 } else {
5493 kfree(phba->vpi_bmask);
5494 kfree(phba->vpi_ids);
5495 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5496 kfree(phba->sli4_hba.xri_bmask);
5497 kfree(phba->sli4_hba.xri_ids);
5498 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5499 kfree(phba->sli4_hba.vfi_bmask);
5500 kfree(phba->sli4_hba.vfi_ids);
5501 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5502 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5503 }
5504
5505 return 0;
5506}
5507
5508/**
4690 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5509 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4691 * @phba: Pointer to HBA context object. 5510 * @phba: Pointer to HBA context object.
4692 * 5511 *
@@ -4708,10 +5527,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4708 struct lpfc_vport *vport = phba->pport; 5527 struct lpfc_vport *vport = phba->pport;
4709 struct lpfc_dmabuf *mp; 5528 struct lpfc_dmabuf *mp;
4710 5529
4711 /*
4712 * TODO: Why does this routine execute these task in a different
4713 * order from probe?
4714 */
4715 /* Perform a PCI function reset to start from clean */ 5530 /* Perform a PCI function reset to start from clean */
4716 rc = lpfc_pci_function_reset(phba); 5531 rc = lpfc_pci_function_reset(phba);
4717 if (unlikely(rc)) 5532 if (unlikely(rc))
@@ -4740,7 +5555,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4740 * to read FCoE param config regions 5555 * to read FCoE param config regions
4741 */ 5556 */
4742 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 5557 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4743 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5558 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
4744 "2570 Failed to read FCoE parameters\n"); 5559 "2570 Failed to read FCoE parameters\n");
4745 5560
4746 /* Issue READ_REV to collect vpd and FW information. */ 5561 /* Issue READ_REV to collect vpd and FW information. */
@@ -4873,6 +5688,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4873 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 5688 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4874 spin_unlock_irq(&phba->hbalock); 5689 spin_unlock_irq(&phba->hbalock);
4875 5690
5691 /*
5692 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
5693 * calls depends on these resources to complete port setup.
5694 */
5695 rc = lpfc_sli4_alloc_resource_identifiers(phba);
5696 if (rc) {
5697 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5698 "2920 Failed to alloc Resource IDs "
5699 "rc = x%x\n", rc);
5700 goto out_free_mbox;
5701 }
5702
4876 /* Read the port's service parameters. */ 5703 /* Read the port's service parameters. */
4877 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 5704 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4878 if (rc) { 5705 if (rc) {
@@ -4906,35 +5733,37 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4906 goto out_free_mbox; 5733 goto out_free_mbox;
4907 } 5734 }
4908 5735
4909 if (phba->cfg_soft_wwnn) 5736 lpfc_update_vport_wwn(vport);
4910 u64_to_wwn(phba->cfg_soft_wwnn,
4911 vport->fc_sparam.nodeName.u.wwn);
4912 if (phba->cfg_soft_wwpn)
4913 u64_to_wwn(phba->cfg_soft_wwpn,
4914 vport->fc_sparam.portName.u.wwn);
4915 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4916 sizeof(struct lpfc_name));
4917 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4918 sizeof(struct lpfc_name));
4919 5737
4920 /* Update the fc_host data structures with new wwn. */ 5738 /* Update the fc_host data structures with new wwn. */
4921 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 5739 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4922 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 5740 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4923 5741
4924 /* Register SGL pool to the device using non-embedded mailbox command */ 5742 /* Register SGL pool to the device using non-embedded mailbox command */
4925 rc = lpfc_sli4_post_sgl_list(phba); 5743 if (!phba->sli4_hba.extents_in_use) {
4926 if (unlikely(rc)) { 5744 rc = lpfc_sli4_post_els_sgl_list(phba);
4927 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5745 if (unlikely(rc)) {
4928 "0582 Error %d during sgl post operation\n", 5746 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4929 rc); 5747 "0582 Error %d during els sgl post "
4930 rc = -ENODEV; 5748 "operation\n", rc);
4931 goto out_free_mbox; 5749 rc = -ENODEV;
5750 goto out_free_mbox;
5751 }
5752 } else {
5753 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
5754 if (unlikely(rc)) {
5755 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5756 "2560 Error %d during els sgl post "
5757 "operation\n", rc);
5758 rc = -ENODEV;
5759 goto out_free_mbox;
5760 }
4932 } 5761 }
4933 5762
4934 /* Register SCSI SGL pool to the device */ 5763 /* Register SCSI SGL pool to the device */
4935 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 5764 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4936 if (unlikely(rc)) { 5765 if (unlikely(rc)) {
4937 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5766 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4938 "0383 Error %d during scsi sgl post " 5767 "0383 Error %d during scsi sgl post "
4939 "operation\n", rc); 5768 "operation\n", rc);
4940 /* Some Scsi buffers were moved to the abort scsi list */ 5769 /* Some Scsi buffers were moved to the abort scsi list */
@@ -5747,10 +6576,15 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5747 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 6576 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5748 sizeof(struct lpfc_mcqe)); 6577 sizeof(struct lpfc_mcqe));
5749 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 6578 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5750 6579 /*
5751 /* Prefix the mailbox status with range x4000 to note SLI4 status. */ 6580 * When the CQE status indicates a failure and the mailbox status
6581 * indicates success then copy the CQE status into the mailbox status
6582 * (and prefix it with x4000).
6583 */
5752 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 6584 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5753 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 6585 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
6586 bf_set(lpfc_mqe_status, mb,
6587 (LPFC_MBX_ERROR_RANGE | mcqe_status));
5754 rc = MBXERR_ERROR; 6588 rc = MBXERR_ERROR;
5755 } else 6589 } else
5756 lpfc_sli4_swap_str(phba, mboxq); 6590 lpfc_sli4_swap_str(phba, mboxq);
@@ -5819,7 +6653,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5819 else 6653 else
5820 rc = -EIO; 6654 rc = -EIO;
5821 if (rc != MBX_SUCCESS) 6655 if (rc != MBX_SUCCESS)
5822 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6656 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5823 "(%d):2541 Mailbox command x%x " 6657 "(%d):2541 Mailbox command x%x "
5824 "(x%x) cannot issue Data: x%x x%x\n", 6658 "(x%x) cannot issue Data: x%x x%x\n",
5825 mboxq->vport ? mboxq->vport->vpi : 0, 6659 mboxq->vport ? mboxq->vport->vpi : 0,
@@ -6307,6 +7141,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6307 sgl->addr_hi = bpl->addrHigh; 7141 sgl->addr_hi = bpl->addrHigh;
6308 sgl->addr_lo = bpl->addrLow; 7142 sgl->addr_lo = bpl->addrLow;
6309 7143
7144 sgl->word2 = le32_to_cpu(sgl->word2);
6310 if ((i+1) == numBdes) 7145 if ((i+1) == numBdes)
6311 bf_set(lpfc_sli4_sge_last, sgl, 1); 7146 bf_set(lpfc_sli4_sge_last, sgl, 1);
6312 else 7147 else
@@ -6343,6 +7178,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
6343 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 7178 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
6344 sgl->addr_lo = 7179 sgl->addr_lo =
6345 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 7180 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
7181 sgl->word2 = le32_to_cpu(sgl->word2);
6346 bf_set(lpfc_sli4_sge_last, sgl, 1); 7182 bf_set(lpfc_sli4_sge_last, sgl, 1);
6347 sgl->word2 = cpu_to_le32(sgl->word2); 7183 sgl->word2 = cpu_to_le32(sgl->word2);
6348 sgl->sge_len = 7184 sgl->sge_len =
@@ -6474,7 +7310,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6474 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7310 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
6475 >> LPFC_FIP_ELS_ID_SHIFT); 7311 >> LPFC_FIP_ELS_ID_SHIFT);
6476 } 7312 }
6477 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi); 7313 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7314 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6478 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7315 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
6479 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7316 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
6480 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7317 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
@@ -6623,14 +7460,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6623 iocbq->iocb.ulpContext); 7460 iocbq->iocb.ulpContext);
6624 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7461 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6625 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7462 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
6626 iocbq->vport->vpi + phba->vpi_base); 7463 phba->vpi_ids[iocbq->vport->vpi]);
6627 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 7464 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
6628 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 7465 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
6629 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 7466 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
6630 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 7467 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
6631 LPFC_WQE_LENLOC_WORD3); 7468 LPFC_WQE_LENLOC_WORD3);
6632 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7469 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
6633 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi); 7470 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7471 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
6634 command_type = OTHER_COMMAND; 7472 command_type = OTHER_COMMAND;
6635 break; 7473 break;
6636 case CMD_CLOSE_XRI_CN: 7474 case CMD_CLOSE_XRI_CN:
@@ -6729,6 +7567,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6729 return IOCB_ERROR; 7567 return IOCB_ERROR;
6730 break; 7568 break;
6731 } 7569 }
7570
6732 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7571 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
6733 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 7572 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
6734 wqe->generic.wqe_com.abort_tag = abort_tag; 7573 wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -6776,7 +7615,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6776 return IOCB_BUSY; 7615 return IOCB_BUSY;
6777 } 7616 }
6778 } else { 7617 } else {
6779 sglq = __lpfc_sli_get_sglq(phba, piocb); 7618 sglq = __lpfc_sli_get_sglq(phba, piocb);
6780 if (!sglq) { 7619 if (!sglq) {
6781 if (!(flag & SLI_IOCB_RET_IOCB)) { 7620 if (!(flag & SLI_IOCB_RET_IOCB)) {
6782 __lpfc_sli_ringtx_put(phba, 7621 __lpfc_sli_ringtx_put(phba,
@@ -6789,11 +7628,11 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6789 } 7628 }
6790 } 7629 }
6791 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 7630 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6792 sglq = NULL; /* These IO's already have an XRI and 7631 /* These IO's already have an XRI and a mapped sgl. */
6793 * a mapped sgl. 7632 sglq = NULL;
6794 */
6795 } else { 7633 } else {
6796 /* This is a continuation of a commandi,(CX) so this 7634 /*
7635 * This is a continuation of a commandi,(CX) so this
6797 * sglq is on the active list 7636 * sglq is on the active list
6798 */ 7637 */
6799 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 7638 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
@@ -6802,8 +7641,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6802 } 7641 }
6803 7642
6804 if (sglq) { 7643 if (sglq) {
7644 piocb->sli4_lxritag = sglq->sli4_lxritag;
6805 piocb->sli4_xritag = sglq->sli4_xritag; 7645 piocb->sli4_xritag = sglq->sli4_xritag;
6806
6807 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 7646 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
6808 return IOCB_ERROR; 7647 return IOCB_ERROR;
6809 } 7648 }
@@ -9799,7 +10638,12 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
9799 break; 10638 break;
9800 case LPFC_WCQ: 10639 case LPFC_WCQ:
9801 while ((cqe = lpfc_sli4_cq_get(cq))) { 10640 while ((cqe = lpfc_sli4_cq_get(cq))) {
9802 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); 10641 if (cq->subtype == LPFC_FCP)
10642 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
10643 cqe);
10644 else
10645 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
10646 cqe);
9803 if (!(++ecount % LPFC_GET_QE_REL_INT)) 10647 if (!(++ecount % LPFC_GET_QE_REL_INT))
9804 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 10648 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9805 } 10649 }
@@ -11446,6 +12290,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11446 LPFC_MBOXQ_t *mbox; 12290 LPFC_MBOXQ_t *mbox;
11447 int rc; 12291 int rc;
11448 uint32_t shdr_status, shdr_add_status; 12292 uint32_t shdr_status, shdr_add_status;
12293 uint32_t mbox_tmo;
11449 union lpfc_sli4_cfg_shdr *shdr; 12294 union lpfc_sli4_cfg_shdr *shdr;
11450 12295
11451 if (xritag == NO_XRI) { 12296 if (xritag == NO_XRI) {
@@ -11479,8 +12324,10 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11479 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 12324 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
11480 if (!phba->sli4_hba.intr_enable) 12325 if (!phba->sli4_hba.intr_enable)
11481 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12326 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11482 else 12327 else {
11483 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 12328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12329 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12330 }
11484 /* The IOCTL status is embedded in the mailbox subheader. */ 12331 /* The IOCTL status is embedded in the mailbox subheader. */
11485 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 12332 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
11486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12333 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -11498,6 +12345,76 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11498} 12345}
11499 12346
11500/** 12347/**
12348 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
12349 * @phba: pointer to lpfc hba data structure.
12350 *
12351 * This routine is invoked to post rpi header templates to the
12352 * port for those SLI4 ports that do not support extents. This routine
12353 * posts a PAGE_SIZE memory region to the port to hold up to
12354 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
12355 * and should be called only when interrupts are disabled.
12356 *
12357 * Return codes
12358 * 0 - successful
12359 * -ERROR - otherwise.
12360 */
12361uint16_t
12362lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12363{
12364 unsigned long xri;
12365
12366 /*
12367 * Fetch the next logical xri. Because this index is logical,
12368 * the driver starts at 0 each time.
12369 */
12370 spin_lock_irq(&phba->hbalock);
12371 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
12372 phba->sli4_hba.max_cfg_param.max_xri, 0);
12373 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
12374 spin_unlock_irq(&phba->hbalock);
12375 return NO_XRI;
12376 } else {
12377 set_bit(xri, phba->sli4_hba.xri_bmask);
12378 phba->sli4_hba.max_cfg_param.xri_used++;
12379 phba->sli4_hba.xri_count++;
12380 }
12381
12382 spin_unlock_irq(&phba->hbalock);
12383 return xri;
12384}
12385
12386/**
12387 * lpfc_sli4_free_xri - Release an xri for reuse.
12388 * @phba: pointer to lpfc hba data structure.
12389 *
12390 * This routine is invoked to release an xri to the pool of
12391 * available rpis maintained by the driver.
12392 **/
12393void
12394__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12395{
12396 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
12397 phba->sli4_hba.xri_count--;
12398 phba->sli4_hba.max_cfg_param.xri_used--;
12399 }
12400}
12401
12402/**
12403 * lpfc_sli4_free_xri - Release an xri for reuse.
12404 * @phba: pointer to lpfc hba data structure.
12405 *
12406 * This routine is invoked to release an xri to the pool of
12407 * available rpis maintained by the driver.
12408 **/
12409void
12410lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12411{
12412 spin_lock_irq(&phba->hbalock);
12413 __lpfc_sli4_free_xri(phba, xri);
12414 spin_unlock_irq(&phba->hbalock);
12415}
12416
12417/**
11501 * lpfc_sli4_next_xritag - Get an xritag for the io 12418 * lpfc_sli4_next_xritag - Get an xritag for the io
11502 * @phba: Pointer to HBA context object. 12419 * @phba: Pointer to HBA context object.
11503 * 12420 *
@@ -11510,30 +12427,23 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
11510uint16_t 12427uint16_t
11511lpfc_sli4_next_xritag(struct lpfc_hba *phba) 12428lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11512{ 12429{
11513 uint16_t xritag; 12430 uint16_t xri_index;
11514 12431
11515 spin_lock_irq(&phba->hbalock); 12432 xri_index = lpfc_sli4_alloc_xri(phba);
11516 xritag = phba->sli4_hba.next_xri; 12433 if (xri_index != NO_XRI)
11517 if ((xritag != (uint16_t) -1) && xritag < 12434 return xri_index;
11518 (phba->sli4_hba.max_cfg_param.max_xri 12435
11519 + phba->sli4_hba.max_cfg_param.xri_base)) { 12436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11520 phba->sli4_hba.next_xri++;
11521 phba->sli4_hba.max_cfg_param.xri_used++;
11522 spin_unlock_irq(&phba->hbalock);
11523 return xritag;
11524 }
11525 spin_unlock_irq(&phba->hbalock);
11526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11527 "2004 Failed to allocate XRI.last XRITAG is %d" 12437 "2004 Failed to allocate XRI.last XRITAG is %d"
11528 " Max XRI is %d, Used XRI is %d\n", 12438 " Max XRI is %d, Used XRI is %d\n",
11529 phba->sli4_hba.next_xri, 12439 xri_index,
11530 phba->sli4_hba.max_cfg_param.max_xri, 12440 phba->sli4_hba.max_cfg_param.max_xri,
11531 phba->sli4_hba.max_cfg_param.xri_used); 12441 phba->sli4_hba.max_cfg_param.xri_used);
11532 return -1; 12442 return NO_XRI;
11533} 12443}
11534 12444
11535/** 12445/**
11536 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. 12446 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
11537 * @phba: pointer to lpfc hba data structure. 12447 * @phba: pointer to lpfc hba data structure.
11538 * 12448 *
11539 * This routine is invoked to post a block of driver's sgl pages to the 12449 * This routine is invoked to post a block of driver's sgl pages to the
@@ -11542,7 +12452,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
11542 * stopped. 12452 * stopped.
11543 **/ 12453 **/
11544int 12454int
11545lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) 12455lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
11546{ 12456{
11547 struct lpfc_sglq *sglq_entry; 12457 struct lpfc_sglq *sglq_entry;
11548 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 12458 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -11551,7 +12461,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11551 LPFC_MBOXQ_t *mbox; 12461 LPFC_MBOXQ_t *mbox;
11552 uint32_t reqlen, alloclen, pg_pairs; 12462 uint32_t reqlen, alloclen, pg_pairs;
11553 uint32_t mbox_tmo; 12463 uint32_t mbox_tmo;
11554 uint16_t xritag_start = 0; 12464 uint16_t xritag_start = 0, lxri = 0;
11555 int els_xri_cnt, rc = 0; 12465 int els_xri_cnt, rc = 0;
11556 uint32_t shdr_status, shdr_add_status; 12466 uint32_t shdr_status, shdr_add_status;
11557 union lpfc_sli4_cfg_shdr *shdr; 12467 union lpfc_sli4_cfg_shdr *shdr;
@@ -11568,11 +12478,8 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11568 return -ENOMEM; 12478 return -ENOMEM;
11569 } 12479 }
11570 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12480 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11571 if (!mbox) { 12481 if (!mbox)
11572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11573 "2560 Failed to allocate mbox cmd memory\n");
11574 return -ENOMEM; 12482 return -ENOMEM;
11575 }
11576 12483
11577 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12484 /* Allocate DMA memory and set up the non-embedded mailbox command */
11578 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12485 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -11587,15 +12494,30 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11587 lpfc_sli4_mbox_cmd_free(phba, mbox); 12494 lpfc_sli4_mbox_cmd_free(phba, mbox);
11588 return -ENOMEM; 12495 return -ENOMEM;
11589 } 12496 }
11590 /* Get the first SGE entry from the non-embedded DMA memory */
11591 viraddr = mbox->sge_array->addr[0];
11592
11593 /* Set up the SGL pages in the non-embedded DMA pages */ 12497 /* Set up the SGL pages in the non-embedded DMA pages */
12498 viraddr = mbox->sge_array->addr[0];
11594 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 12499 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
11595 sgl_pg_pairs = &sgl->sgl_pg_pairs; 12500 sgl_pg_pairs = &sgl->sgl_pg_pairs;
11596 12501
11597 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 12502 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
11598 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 12503 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
12504
12505 /*
12506 * Assign the sglq a physical xri only if the driver has not
12507 * initialized those resources. A port reset only needs
12508 * the sglq's posted.
12509 */
12510 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
12511 LPFC_XRI_RSRC_RDY) {
12512 lxri = lpfc_sli4_next_xritag(phba);
12513 if (lxri == NO_XRI) {
12514 lpfc_sli4_mbox_cmd_free(phba, mbox);
12515 return -ENOMEM;
12516 }
12517 sglq_entry->sli4_lxritag = lxri;
12518 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
12519 }
12520
11599 /* Set up the sge entry */ 12521 /* Set up the sge entry */
11600 sgl_pg_pairs->sgl_pg0_addr_lo = 12522 sgl_pg_pairs->sgl_pg0_addr_lo =
11601 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 12523 cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -11605,16 +12527,17 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11605 cpu_to_le32(putPaddrLow(0)); 12527 cpu_to_le32(putPaddrLow(0));
11606 sgl_pg_pairs->sgl_pg1_addr_hi = 12528 sgl_pg_pairs->sgl_pg1_addr_hi =
11607 cpu_to_le32(putPaddrHigh(0)); 12529 cpu_to_le32(putPaddrHigh(0));
12530
11608 /* Keep the first xritag on the list */ 12531 /* Keep the first xritag on the list */
11609 if (pg_pairs == 0) 12532 if (pg_pairs == 0)
11610 xritag_start = sglq_entry->sli4_xritag; 12533 xritag_start = sglq_entry->sli4_xritag;
11611 sgl_pg_pairs++; 12534 sgl_pg_pairs++;
11612 } 12535 }
12536
12537 /* Complete initialization and perform endian conversion. */
11613 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 12538 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
11614 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 12539 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
11615 /* Perform endian conversion if necessary */
11616 sgl->word0 = cpu_to_le32(sgl->word0); 12540 sgl->word0 = cpu_to_le32(sgl->word0);
11617
11618 if (!phba->sli4_hba.intr_enable) 12541 if (!phba->sli4_hba.intr_enable)
11619 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12542 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11620 else { 12543 else {
@@ -11633,6 +12556,181 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
11633 shdr_status, shdr_add_status, rc); 12556 shdr_status, shdr_add_status, rc);
11634 rc = -ENXIO; 12557 rc = -ENXIO;
11635 } 12558 }
12559
12560 if (rc == 0)
12561 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12562 LPFC_XRI_RSRC_RDY);
12563 return rc;
12564}
12565
12566/**
12567 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
12568 * @phba: pointer to lpfc hba data structure.
12569 *
12570 * This routine is invoked to post a block of driver's sgl pages to the
12571 * HBA using non-embedded mailbox command. No Lock is held. This routine
12572 * is only called when the driver is loading and after all IO has been
12573 * stopped.
12574 **/
12575int
12576lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
12577{
12578 struct lpfc_sglq *sglq_entry;
12579 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12580 struct sgl_page_pairs *sgl_pg_pairs;
12581 void *viraddr;
12582 LPFC_MBOXQ_t *mbox;
12583 uint32_t reqlen, alloclen, index;
12584 uint32_t mbox_tmo;
12585 uint16_t rsrc_start, rsrc_size, els_xri_cnt;
12586 uint16_t xritag_start = 0, lxri = 0;
12587 struct lpfc_rsrc_blks *rsrc_blk;
12588 int cnt, ttl_cnt, rc = 0;
12589 int loop_cnt;
12590 uint32_t shdr_status, shdr_add_status;
12591 union lpfc_sli4_cfg_shdr *shdr;
12592
12593 /* The number of sgls to be posted */
12594 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
12595
12596 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
12597 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12598 if (reqlen > SLI4_PAGE_SIZE) {
12599 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12600 "2989 Block sgl registration required DMA "
12601 "size (%d) great than a page\n", reqlen);
12602 return -ENOMEM;
12603 }
12604
12605 cnt = 0;
12606 ttl_cnt = 0;
12607 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12608 list) {
12609 rsrc_start = rsrc_blk->rsrc_start;
12610 rsrc_size = rsrc_blk->rsrc_size;
12611
12612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12613 "3014 Working ELS Extent start %d, cnt %d\n",
12614 rsrc_start, rsrc_size);
12615
12616 loop_cnt = min(els_xri_cnt, rsrc_size);
12617 if (ttl_cnt + loop_cnt >= els_xri_cnt) {
12618 loop_cnt = els_xri_cnt - ttl_cnt;
12619 ttl_cnt = els_xri_cnt;
12620 }
12621
12622 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12623 if (!mbox)
12624 return -ENOMEM;
12625 /*
12626 * Allocate DMA memory and set up the non-embedded mailbox
12627 * command.
12628 */
12629 alloclen = lpfc_sli4_config(phba, mbox,
12630 LPFC_MBOX_SUBSYSTEM_FCOE,
12631 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12632 reqlen, LPFC_SLI4_MBX_NEMBED);
12633 if (alloclen < reqlen) {
12634 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12635 "2987 Allocated DMA memory size (%d) "
12636 "is less than the requested DMA memory "
12637 "size (%d)\n", alloclen, reqlen);
12638 lpfc_sli4_mbox_cmd_free(phba, mbox);
12639 return -ENOMEM;
12640 }
12641
12642 /* Set up the SGL pages in the non-embedded DMA pages */
12643 viraddr = mbox->sge_array->addr[0];
12644 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12645 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12646
12647 /*
12648 * The starting resource may not begin at zero. Control
12649 * the loop variants via the block resource parameters,
12650 * but handle the sge pointers with a zero-based index
12651 * that doesn't get reset per loop pass.
12652 */
12653 for (index = rsrc_start;
12654 index < rsrc_start + loop_cnt;
12655 index++) {
12656 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
12657
12658 /*
12659 * Assign the sglq a physical xri only if the driver
12660 * has not initialized those resources. A port reset
12661 * only needs the sglq's posted.
12662 */
12663 if (bf_get(lpfc_xri_rsrc_rdy,
12664 &phba->sli4_hba.sli4_flags) !=
12665 LPFC_XRI_RSRC_RDY) {
12666 lxri = lpfc_sli4_next_xritag(phba);
12667 if (lxri == NO_XRI) {
12668 lpfc_sli4_mbox_cmd_free(phba, mbox);
12669 rc = -ENOMEM;
12670 goto err_exit;
12671 }
12672 sglq_entry->sli4_lxritag = lxri;
12673 sglq_entry->sli4_xritag =
12674 phba->sli4_hba.xri_ids[lxri];
12675 }
12676
12677 /* Set up the sge entry */
12678 sgl_pg_pairs->sgl_pg0_addr_lo =
12679 cpu_to_le32(putPaddrLow(sglq_entry->phys));
12680 sgl_pg_pairs->sgl_pg0_addr_hi =
12681 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
12682 sgl_pg_pairs->sgl_pg1_addr_lo =
12683 cpu_to_le32(putPaddrLow(0));
12684 sgl_pg_pairs->sgl_pg1_addr_hi =
12685 cpu_to_le32(putPaddrHigh(0));
12686
12687 /* Track the starting physical XRI for the mailbox. */
12688 if (index == rsrc_start)
12689 xritag_start = sglq_entry->sli4_xritag;
12690 sgl_pg_pairs++;
12691 cnt++;
12692 }
12693
12694 /* Complete initialization and perform endian conversion. */
12695 rsrc_blk->rsrc_used += loop_cnt;
12696 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
12697 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
12698 sgl->word0 = cpu_to_le32(sgl->word0);
12699
12700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12701 "3015 Post ELS Extent SGL, start %d, "
12702 "cnt %d, used %d\n",
12703 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
12704 if (!phba->sli4_hba.intr_enable)
12705 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12706 else {
12707 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12708 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12709 }
12710 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12711 shdr_status = bf_get(lpfc_mbox_hdr_status,
12712 &shdr->response);
12713 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12714 &shdr->response);
12715 if (rc != MBX_TIMEOUT)
12716 lpfc_sli4_mbox_cmd_free(phba, mbox);
12717 if (shdr_status || shdr_add_status || rc) {
12718 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12719 "2988 POST_SGL_BLOCK mailbox "
12720 "command failed status x%x "
12721 "add_status x%x mbx status x%x\n",
12722 shdr_status, shdr_add_status, rc);
12723 rc = -ENXIO;
12724 goto err_exit;
12725 }
12726 if (ttl_cnt >= els_xri_cnt)
12727 break;
12728 }
12729
12730 err_exit:
12731 if (rc == 0)
12732 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12733 LPFC_XRI_RSRC_RDY);
11636 return rc; 12734 return rc;
11637} 12735}
11638 12736
@@ -11693,6 +12791,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11693 lpfc_sli4_mbox_cmd_free(phba, mbox); 12791 lpfc_sli4_mbox_cmd_free(phba, mbox);
11694 return -ENOMEM; 12792 return -ENOMEM;
11695 } 12793 }
12794
11696 /* Get the first SGE entry from the non-embedded DMA memory */ 12795 /* Get the first SGE entry from the non-embedded DMA memory */
11697 viraddr = mbox->sge_array->addr[0]; 12796 viraddr = mbox->sge_array->addr[0];
11698 12797
@@ -11748,6 +12847,169 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
11748} 12847}
11749 12848
11750/** 12849/**
12850 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
12851 * @phba: pointer to lpfc hba data structure.
12852 * @sblist: pointer to scsi buffer list.
12853 * @count: number of scsi buffers on the list.
12854 *
12855 * This routine is invoked to post a block of @count scsi sgl pages from a
12856 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
12857 * No Lock is held.
12858 *
12859 **/
12860int
12861lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
12862 int cnt)
12863{
12864 struct lpfc_scsi_buf *psb = NULL;
12865 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12866 struct sgl_page_pairs *sgl_pg_pairs;
12867 void *viraddr;
12868 LPFC_MBOXQ_t *mbox;
12869 uint32_t reqlen, alloclen, pg_pairs;
12870 uint32_t mbox_tmo;
12871 uint16_t xri_start = 0, scsi_xri_start;
12872 uint16_t rsrc_range;
12873 int rc = 0, avail_cnt;
12874 uint32_t shdr_status, shdr_add_status;
12875 dma_addr_t pdma_phys_bpl1;
12876 union lpfc_sli4_cfg_shdr *shdr;
12877 struct lpfc_rsrc_blks *rsrc_blk;
12878 uint32_t xri_cnt = 0;
12879
12880 /* Calculate the total requested length of the dma memory */
12881 reqlen = cnt * sizeof(struct sgl_page_pairs) +
12882 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12883 if (reqlen > SLI4_PAGE_SIZE) {
12884 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12885 "2932 Block sgl registration required DMA "
12886 "size (%d) great than a page\n", reqlen);
12887 return -ENOMEM;
12888 }
12889
12890 /*
12891 * The use of extents requires the driver to post the sgl headers
12892 * in multiple postings to meet the contiguous resource assignment.
12893 */
12894 psb = list_prepare_entry(psb, sblist, list);
12895 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
12896 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
12897 list) {
12898 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
12899 if (rsrc_range < scsi_xri_start)
12900 continue;
12901 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
12902 continue;
12903 else
12904 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
12905
12906 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
12907 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
12908 /*
12909 * Allocate DMA memory and set up the non-embedded mailbox
12910 * command. The mbox is used to post an SGL page per loop
12911 * but the DMA memory has a use-once semantic so the mailbox
12912 * is used and freed per loop pass.
12913 */
12914 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12915 if (!mbox) {
12916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12917 "2933 Failed to allocate mbox cmd "
12918 "memory\n");
12919 return -ENOMEM;
12920 }
12921 alloclen = lpfc_sli4_config(phba, mbox,
12922 LPFC_MBOX_SUBSYSTEM_FCOE,
12923 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12924 reqlen,
12925 LPFC_SLI4_MBX_NEMBED);
12926 if (alloclen < reqlen) {
12927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12928 "2934 Allocated DMA memory size (%d) "
12929 "is less than the requested DMA memory "
12930 "size (%d)\n", alloclen, reqlen);
12931 lpfc_sli4_mbox_cmd_free(phba, mbox);
12932 return -ENOMEM;
12933 }
12934
12935 /* Get the first SGE entry from the non-embedded DMA memory */
12936 viraddr = mbox->sge_array->addr[0];
12937
12938 /* Set up the SGL pages in the non-embedded DMA pages */
12939 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12940 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12941
12942 /* pg_pairs tracks posted SGEs per loop iteration. */
12943 pg_pairs = 0;
12944 list_for_each_entry_continue(psb, sblist, list) {
12945 /* Set up the sge entry */
12946 sgl_pg_pairs->sgl_pg0_addr_lo =
12947 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
12948 sgl_pg_pairs->sgl_pg0_addr_hi =
12949 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
12950 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
12951 pdma_phys_bpl1 = psb->dma_phys_bpl +
12952 SGL_PAGE_SIZE;
12953 else
12954 pdma_phys_bpl1 = 0;
12955 sgl_pg_pairs->sgl_pg1_addr_lo =
12956 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
12957 sgl_pg_pairs->sgl_pg1_addr_hi =
12958 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
12959 /* Keep the first xri for this extent. */
12960 if (pg_pairs == 0)
12961 xri_start = psb->cur_iocbq.sli4_xritag;
12962 sgl_pg_pairs++;
12963 pg_pairs++;
12964 xri_cnt++;
12965
12966 /*
12967 * Track two exit conditions - the loop has constructed
12968 * all of the caller's SGE pairs or all available
12969 * resource IDs in this extent are consumed.
12970 */
12971 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
12972 break;
12973 }
12974 rsrc_blk->rsrc_used += pg_pairs;
12975 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
12976 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
12977
12978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12979 "3016 Post SCSI Extent SGL, start %d, cnt %d "
12980 "blk use %d\n",
12981 xri_start, pg_pairs, rsrc_blk->rsrc_used);
12982 /* Perform endian conversion if necessary */
12983 sgl->word0 = cpu_to_le32(sgl->word0);
12984 if (!phba->sli4_hba.intr_enable)
12985 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12986 else {
12987 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12988 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12989 }
12990 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12993 &shdr->response);
12994 if (rc != MBX_TIMEOUT)
12995 lpfc_sli4_mbox_cmd_free(phba, mbox);
12996 if (shdr_status || shdr_add_status || rc) {
12997 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12998 "2935 POST_SGL_BLOCK mailbox command "
12999 "failed status x%x add_status x%x "
13000 "mbx status x%x\n",
13001 shdr_status, shdr_add_status, rc);
13002 return -ENXIO;
13003 }
13004
13005 /* Post only what is requested. */
13006 if (xri_cnt >= cnt)
13007 break;
13008 }
13009 return rc;
13010}
13011
13012/**
11751 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13013 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
11752 * @phba: pointer to lpfc_hba struct that the frame was received on 13014 * @phba: pointer to lpfc_hba struct that the frame was received on
11753 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13015 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -12137,6 +13399,28 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
12137} 13399}
12138 13400
12139/** 13401/**
13402 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
13403 * @phba: Pointer to HBA context object.
13404 * @xri: xri id in transaction.
13405 *
13406 * This function validates the xri maps to the known range of XRIs allocated an
13407 * used by the driver.
13408 **/
13409static uint16_t
13410lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13411 uint16_t xri)
13412{
13413 int i;
13414
13415 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
13416 if (xri == phba->sli4_hba.xri_ids[i])
13417 return i;
13418 }
13419 return NO_XRI;
13420}
13421
13422
13423/**
12140 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 13424 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
12141 * @phba: Pointer to HBA context object. 13425 * @phba: Pointer to HBA context object.
12142 * @fc_hdr: pointer to a FC frame header. 13426 * @fc_hdr: pointer to a FC frame header.
@@ -12169,9 +13453,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12169 "SID:x%x\n", oxid, sid); 13453 "SID:x%x\n", oxid, sid);
12170 return; 13454 return;
12171 } 13455 }
12172 if (rxid >= phba->sli4_hba.max_cfg_param.xri_base 13456 if (lpfc_sli4_xri_inrange(phba, rxid))
12173 && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
12174 + phba->sli4_hba.max_cfg_param.xri_base))
12175 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 13457 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
12176 13458
12177 /* Allocate buffer for rsp iocb */ 13459 /* Allocate buffer for rsp iocb */
@@ -12194,12 +13476,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
12194 icmd->ulpBdeCount = 0; 13476 icmd->ulpBdeCount = 0;
12195 icmd->ulpLe = 1; 13477 icmd->ulpLe = 1;
12196 icmd->ulpClass = CLASS3; 13478 icmd->ulpClass = CLASS3;
12197 icmd->ulpContext = ndlp->nlp_rpi; 13479 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
12198 ctiocb->context1 = ndlp; 13480 ctiocb->context1 = ndlp;
12199 13481
12200 ctiocb->iocb_cmpl = NULL; 13482 ctiocb->iocb_cmpl = NULL;
12201 ctiocb->vport = phba->pport; 13483 ctiocb->vport = phba->pport;
12202 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 13484 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
13485 ctiocb->sli4_lxritag = NO_XRI;
12203 ctiocb->sli4_xritag = NO_XRI; 13486 ctiocb->sli4_xritag = NO_XRI;
12204 13487
12205 /* If the oxid maps to the FCP XRI range or if it is out of range, 13488 /* If the oxid maps to the FCP XRI range or if it is out of range,
@@ -12380,8 +13663,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
12380 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
12381 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13664 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
12382 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 13665 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
12383 first_iocbq->iocb.unsli3.rcvsli3.vpi = 13666 /* iocbq is prepped for internal consumption. Logical vpi. */
12384 vport->vpi + vport->phba->vpi_base; 13667 first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
12385 /* put the first buffer into the first IOCBq */ 13668 /* put the first buffer into the first IOCBq */
12386 first_iocbq->context2 = &seq_dmabuf->dbuf; 13669 first_iocbq->context2 = &seq_dmabuf->dbuf;
12387 first_iocbq->context3 = NULL; 13670 first_iocbq->context3 = NULL;
@@ -12461,7 +13744,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
12461 &phba->sli.ring[LPFC_ELS_RING], 13744 &phba->sli.ring[LPFC_ELS_RING],
12462 iocbq, fc_hdr->fh_r_ctl, 13745 iocbq, fc_hdr->fh_r_ctl,
12463 fc_hdr->fh_type)) 13746 fc_hdr->fh_type))
12464 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12465 "2540 Ring %d handler: unexpected Rctl " 13748 "2540 Ring %d handler: unexpected Rctl "
12466 "x%x Type x%x received\n", 13749 "x%x Type x%x received\n",
12467 LPFC_ELS_RING, 13750 LPFC_ELS_RING,
@@ -12558,9 +13841,24 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12558{ 13841{
12559 struct lpfc_rpi_hdr *rpi_page; 13842 struct lpfc_rpi_hdr *rpi_page;
12560 uint32_t rc = 0; 13843 uint32_t rc = 0;
13844 uint16_t lrpi = 0;
13845
13846 /* SLI4 ports that support extents do not require RPI headers. */
13847 if (!phba->sli4_hba.rpi_hdrs_in_use)
13848 goto exit;
13849 if (phba->sli4_hba.extents_in_use)
13850 return -EIO;
12561 13851
12562 /* Post all rpi memory regions to the port. */
12563 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 13852 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
13853 /*
13854 * Assign the rpi headers a physical rpi only if the driver
13855 * has not initialized those resources. A port reset only
13856 * needs the headers posted.
13857 */
13858 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
13859 LPFC_RPI_RSRC_RDY)
13860 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
13861
12564 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 13862 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
12565 if (rc != MBX_SUCCESS) { 13863 if (rc != MBX_SUCCESS) {
12566 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13864 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12571,6 +13869,9 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
12571 } 13869 }
12572 } 13870 }
12573 13871
13872 exit:
13873 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13874 LPFC_RPI_RSRC_RDY);
12574 return rc; 13875 return rc;
12575} 13876}
12576 13877
@@ -12594,10 +13895,15 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12594 LPFC_MBOXQ_t *mboxq; 13895 LPFC_MBOXQ_t *mboxq;
12595 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 13896 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
12596 uint32_t rc = 0; 13897 uint32_t rc = 0;
12597 uint32_t mbox_tmo;
12598 uint32_t shdr_status, shdr_add_status; 13898 uint32_t shdr_status, shdr_add_status;
12599 union lpfc_sli4_cfg_shdr *shdr; 13899 union lpfc_sli4_cfg_shdr *shdr;
12600 13900
13901 /* SLI4 ports that support extents do not require RPI headers. */
13902 if (!phba->sli4_hba.rpi_hdrs_in_use)
13903 return rc;
13904 if (phba->sli4_hba.extents_in_use)
13905 return -EIO;
13906
12601 /* The port is notified of the header region via a mailbox command. */ 13907 /* The port is notified of the header region via a mailbox command. */
12602 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13908 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12603 if (!mboxq) { 13909 if (!mboxq) {
@@ -12609,16 +13915,19 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12609 13915
12610 /* Post all rpi memory regions to the port. */ 13916 /* Post all rpi memory regions to the port. */
12611 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 13917 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
12612 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
12613 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 13918 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
12614 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 13919 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
12615 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 13920 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
12616 sizeof(struct lpfc_sli4_cfg_mhdr), 13921 sizeof(struct lpfc_sli4_cfg_mhdr),
12617 LPFC_SLI4_MBX_EMBED); 13922 LPFC_SLI4_MBX_EMBED);
12618 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 13923
12619 hdr_tmpl, rpi_page->page_count); 13924
13925 /* Post the physical rpi to the port for this rpi header. */
12620 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 13926 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
12621 rpi_page->start_rpi); 13927 rpi_page->start_rpi);
13928 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
13929 hdr_tmpl, rpi_page->page_count);
13930
12622 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 13931 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
12623 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 13932 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
12624 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 13933 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -12653,22 +13962,21 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
12653int 13962int
12654lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 13963lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12655{ 13964{
12656 int rpi; 13965 unsigned long rpi;
12657 uint16_t max_rpi, rpi_base, rpi_limit; 13966 uint16_t max_rpi, rpi_limit;
12658 uint16_t rpi_remaining; 13967 uint16_t rpi_remaining, lrpi = 0;
12659 struct lpfc_rpi_hdr *rpi_hdr; 13968 struct lpfc_rpi_hdr *rpi_hdr;
12660 13969
12661 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 13970 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
12662 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
12663 rpi_limit = phba->sli4_hba.next_rpi; 13971 rpi_limit = phba->sli4_hba.next_rpi;
12664 13972
12665 /* 13973 /*
12666 * The valid rpi range is not guaranteed to be zero-based. Start 13974 * Fetch the next logical rpi. Because this index is logical,
12667 * the search at the rpi_base as reported by the port. 13975 * the driver starts at 0 each time.
12668 */ 13976 */
12669 spin_lock_irq(&phba->hbalock); 13977 spin_lock_irq(&phba->hbalock);
12670 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); 13978 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
12671 if (rpi >= rpi_limit || rpi < rpi_base) 13979 if (rpi >= rpi_limit)
12672 rpi = LPFC_RPI_ALLOC_ERROR; 13980 rpi = LPFC_RPI_ALLOC_ERROR;
12673 else { 13981 else {
12674 set_bit(rpi, phba->sli4_hba.rpi_bmask); 13982 set_bit(rpi, phba->sli4_hba.rpi_bmask);
@@ -12678,7 +13986,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12678 13986
12679 /* 13987 /*
12680 * Don't try to allocate more rpi header regions if the device limit 13988 * Don't try to allocate more rpi header regions if the device limit
12681 * on available rpis max has been exhausted. 13989 * has been exhausted.
12682 */ 13990 */
12683 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 13991 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
12684 (phba->sli4_hba.rpi_count >= max_rpi)) { 13992 (phba->sli4_hba.rpi_count >= max_rpi)) {
@@ -12687,13 +13995,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12687 } 13995 }
12688 13996
12689 /* 13997 /*
13998 * RPI header postings are not required for SLI4 ports capable of
13999 * extents.
14000 */
14001 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14002 spin_unlock_irq(&phba->hbalock);
14003 return rpi;
14004 }
14005
14006 /*
12690 * If the driver is running low on rpi resources, allocate another 14007 * If the driver is running low on rpi resources, allocate another
12691 * page now. Note that the next_rpi value is used because 14008 * page now. Note that the next_rpi value is used because
12692 * it represents how many are actually in use whereas max_rpi notes 14009 * it represents how many are actually in use whereas max_rpi notes
12693 * how many are supported max by the device. 14010 * how many are supported max by the device.
12694 */ 14011 */
12695 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - 14012 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
12696 phba->sli4_hba.rpi_count;
12697 spin_unlock_irq(&phba->hbalock); 14013 spin_unlock_irq(&phba->hbalock);
12698 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14014 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
12699 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14015 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
@@ -12702,6 +14018,8 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
12702 "2002 Error Could not grow rpi " 14018 "2002 Error Could not grow rpi "
12703 "count\n"); 14019 "count\n");
12704 } else { 14020 } else {
14021 lrpi = rpi_hdr->start_rpi;
14022 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
12705 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14023 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
12706 } 14024 }
12707 } 14025 }
@@ -12751,6 +14069,8 @@ void
12751lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14069lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
12752{ 14070{
12753 kfree(phba->sli4_hba.rpi_bmask); 14071 kfree(phba->sli4_hba.rpi_bmask);
14072 kfree(phba->sli4_hba.rpi_ids);
14073 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
12754} 14074}
12755 14075
12756/** 14076/**
@@ -13490,6 +14810,96 @@ out:
13490} 14810}
13491 14811
13492/** 14812/**
14813 * lpfc_wr_object - write an object to the firmware
14814 * @phba: HBA structure that indicates port to create a queue on.
14815 * @dmabuf_list: list of dmabufs to write to the port.
14816 * @size: the total byte value of the objects to write to the port.
14817 * @offset: the current offset to be used to start the transfer.
14818 *
14819 * This routine will create a wr_object mailbox command to send to the port.
14820 * the mailbox command will be constructed using the dma buffers described in
14821 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
14822 * BDEs that the imbedded mailbox can support. The @offset variable will be
14823 * used to indicate the starting offset of the transfer and will also return
14824 * the offset after the write object mailbox has completed. @size is used to
14825 * determine the end of the object and whether the eof bit should be set.
14826 *
14827 * Return 0 is successful and offset will contain the the new offset to use
14828 * for the next write.
14829 * Return negative value for error cases.
14830 **/
14831int
14832lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
14833 uint32_t size, uint32_t *offset)
14834{
14835 struct lpfc_mbx_wr_object *wr_object;
14836 LPFC_MBOXQ_t *mbox;
14837 int rc = 0, i = 0;
14838 uint32_t shdr_status, shdr_add_status;
14839 uint32_t mbox_tmo;
14840 union lpfc_sli4_cfg_shdr *shdr;
14841 struct lpfc_dmabuf *dmabuf;
14842 uint32_t written = 0;
14843
14844 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14845 if (!mbox)
14846 return -ENOMEM;
14847
14848 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14849 LPFC_MBOX_OPCODE_WRITE_OBJECT,
14850 sizeof(struct lpfc_mbx_wr_object) -
14851 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14852
14853 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
14854 wr_object->u.request.write_offset = *offset;
14855 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
14856 wr_object->u.request.object_name[0] =
14857 cpu_to_le32(wr_object->u.request.object_name[0]);
14858 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
14859 list_for_each_entry(dmabuf, dmabuf_list, list) {
14860 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
14861 break;
14862 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
14863 wr_object->u.request.bde[i].addrHigh =
14864 putPaddrHigh(dmabuf->phys);
14865 if (written + SLI4_PAGE_SIZE >= size) {
14866 wr_object->u.request.bde[i].tus.f.bdeSize =
14867 (size - written);
14868 written += (size - written);
14869 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
14870 } else {
14871 wr_object->u.request.bde[i].tus.f.bdeSize =
14872 SLI4_PAGE_SIZE;
14873 written += SLI4_PAGE_SIZE;
14874 }
14875 i++;
14876 }
14877 wr_object->u.request.bde_count = i;
14878 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
14879 if (!phba->sli4_hba.intr_enable)
14880 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14881 else {
14882 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
14883 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14884 }
14885 /* The IOCTL status is embedded in the mailbox subheader. */
14886 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
14887 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14889 if (rc != MBX_TIMEOUT)
14890 mempool_free(mbox, phba->mbox_mem_pool);
14891 if (shdr_status || shdr_add_status || rc) {
14892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14893 "3025 Write Object mailbox failed with "
14894 "status x%x add_status x%x, mbx status x%x\n",
14895 shdr_status, shdr_add_status, rc);
14896 rc = -ENXIO;
14897 } else
14898 *offset += wr_object->u.response.actual_write_length;
14899 return rc;
14900}
14901
14902/**
13493 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 14903 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
13494 * @vport: pointer to vport data structure. 14904 * @vport: pointer to vport data structure.
13495 * 14905 *
@@ -13644,7 +15054,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13644 * never happen 15054 * never happen
13645 */ 15055 */
13646 sglq = __lpfc_clear_active_sglq(phba, 15056 sglq = __lpfc_clear_active_sglq(phba,
13647 sglq->sli4_xritag); 15057 sglq->sli4_lxritag);
13648 spin_unlock_irqrestore(&phba->hbalock, iflags); 15058 spin_unlock_irqrestore(&phba->hbalock, iflags);
13649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15059 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13650 "2823 txq empty and txq_cnt is %d\n ", 15060 "2823 txq empty and txq_cnt is %d\n ",
@@ -13656,6 +15066,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
13656 /* The xri and iocb resources secured, 15066 /* The xri and iocb resources secured,
13657 * attempt to issue request 15067 * attempt to issue request
13658 */ 15068 */
15069 piocbq->sli4_lxritag = sglq->sli4_lxritag;
13659 piocbq->sli4_xritag = sglq->sli4_xritag; 15070 piocbq->sli4_xritag = sglq->sli4_xritag;
13660 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 15071 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
13661 fail_msg = "to convert bpl to sgl"; 15072 fail_msg = "to convert bpl to sgl";
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 453577c21c14..a0075b0af142 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -52,6 +52,7 @@ struct lpfc_iocbq {
52 struct list_head clist; 52 struct list_head clist;
53 struct list_head dlist; 53 struct list_head dlist;
54 uint16_t iotag; /* pre-assigned IO tag */ 54 uint16_t iotag; /* pre-assigned IO tag */
55 uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
55 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 56 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
56 struct lpfc_cq_event cq_event; 57 struct lpfc_cq_event cq_event;
57 58
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1a3cbf88f2ce..4b1703554a26 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -310,7 +310,6 @@ struct lpfc_max_cfg_param {
310 uint16_t vfi_base; 310 uint16_t vfi_base;
311 uint16_t vfi_used; 311 uint16_t vfi_used;
312 uint16_t max_fcfi; 312 uint16_t max_fcfi;
313 uint16_t fcfi_base;
314 uint16_t fcfi_used; 313 uint16_t fcfi_used;
315 uint16_t max_eq; 314 uint16_t max_eq;
316 uint16_t max_rq; 315 uint16_t max_rq;
@@ -365,6 +364,11 @@ struct lpfc_pc_sli4_params {
365 uint8_t rqv; 364 uint8_t rqv;
366}; 365};
367 366
367struct lpfc_iov {
368 uint32_t pf_number;
369 uint32_t vf_number;
370};
371
368/* SLI4 HBA data structure entries */ 372/* SLI4 HBA data structure entries */
369struct lpfc_sli4_hba { 373struct lpfc_sli4_hba {
370 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 374 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -444,10 +448,13 @@ struct lpfc_sli4_hba {
444 uint32_t intr_enable; 448 uint32_t intr_enable;
445 struct lpfc_bmbx bmbx; 449 struct lpfc_bmbx bmbx;
446 struct lpfc_max_cfg_param max_cfg_param; 450 struct lpfc_max_cfg_param max_cfg_param;
451 uint16_t extents_in_use; /* must allocate resource extents. */
452 uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
447 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ 453 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
448 uint16_t next_rpi; 454 uint16_t next_rpi;
449 uint16_t scsi_xri_max; 455 uint16_t scsi_xri_max;
450 uint16_t scsi_xri_cnt; 456 uint16_t scsi_xri_cnt;
457 uint16_t scsi_xri_start;
451 struct list_head lpfc_free_sgl_list; 458 struct list_head lpfc_free_sgl_list;
452 struct list_head lpfc_sgl_list; 459 struct list_head lpfc_sgl_list;
453 struct lpfc_sglq **lpfc_els_sgl_array; 460 struct lpfc_sglq **lpfc_els_sgl_array;
@@ -458,7 +465,17 @@ struct lpfc_sli4_hba {
458 struct lpfc_sglq **lpfc_sglq_active_list; 465 struct lpfc_sglq **lpfc_sglq_active_list;
459 struct list_head lpfc_rpi_hdr_list; 466 struct list_head lpfc_rpi_hdr_list;
460 unsigned long *rpi_bmask; 467 unsigned long *rpi_bmask;
468 uint16_t *rpi_ids;
461 uint16_t rpi_count; 469 uint16_t rpi_count;
470 struct list_head lpfc_rpi_blk_list;
471 unsigned long *xri_bmask;
472 uint16_t *xri_ids;
473 uint16_t xri_count;
474 struct list_head lpfc_xri_blk_list;
475 unsigned long *vfi_bmask;
476 uint16_t *vfi_ids;
477 uint16_t vfi_count;
478 struct list_head lpfc_vfi_blk_list;
462 struct lpfc_sli4_flags sli4_flags; 479 struct lpfc_sli4_flags sli4_flags;
463 struct list_head sp_queue_event; 480 struct list_head sp_queue_event;
464 struct list_head sp_cqe_event_pool; 481 struct list_head sp_cqe_event_pool;
@@ -467,6 +484,7 @@ struct lpfc_sli4_hba {
467 struct list_head sp_els_xri_aborted_work_queue; 484 struct list_head sp_els_xri_aborted_work_queue;
468 struct list_head sp_unsol_work_queue; 485 struct list_head sp_unsol_work_queue;
469 struct lpfc_sli4_link link_state; 486 struct lpfc_sli4_link link_state;
487 struct lpfc_iov iov;
470 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 488 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
471 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 489 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
472}; 490};
@@ -490,6 +508,7 @@ struct lpfc_sglq {
490 enum lpfc_sgl_state state; 508 enum lpfc_sgl_state state;
491 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */ 509 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
492 uint16_t iotag; /* pre-assigned IO tag */ 510 uint16_t iotag; /* pre-assigned IO tag */
511 uint16_t sli4_lxritag; /* logical pre-assigned xri. */
493 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 512 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
494 struct sli4_sge *sgl; /* pre-assigned SGL */ 513 struct sli4_sge *sgl; /* pre-assigned SGL */
495 void *virt; /* virtual address. */ 514 void *virt; /* virtual address. */
@@ -504,6 +523,13 @@ struct lpfc_rpi_hdr {
504 uint32_t start_rpi; 523 uint32_t start_rpi;
505}; 524};
506 525
526struct lpfc_rsrc_blks {
527 struct list_head list;
528 uint16_t rsrc_start;
529 uint16_t rsrc_size;
530 uint16_t rsrc_used;
531};
532
507/* 533/*
508 * SLI4 specific function prototypes 534 * SLI4 specific function prototypes
509 */ 535 */
@@ -543,8 +569,11 @@ int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
543int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); 569int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
544uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); 570uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
545int lpfc_sli4_post_async_mbox(struct lpfc_hba *); 571int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
546int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); 572int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
573int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
547int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); 574int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
575int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
576 int);
548struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 577struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
549struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); 578struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
550void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); 579void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 30ba5440c67a..1feb551a57bc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -83,7 +83,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
83static int 83static int
84lpfc_alloc_vpi(struct lpfc_hba *phba) 84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{ 85{
86 int vpi; 86 unsigned long vpi;
87 87
88 spin_lock_irq(&phba->hbalock); 88 spin_lock_irq(&phba->hbalock);
89 /* Start at bit 1 because vpi zero is reserved for the physical port */ 89 /* Start at bit 1 because vpi zero is reserved for the physical port */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 046dcc672ec1..7370c084b178 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.05.34-rc1" 36#define MEGASAS_VERSION "00.00.05.38-rc1"
37#define MEGASAS_RELDATE "Feb. 24, 2011" 37#define MEGASAS_RELDATE "May. 11, 2011"
38#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -76,8 +76,8 @@
76#define MFI_STATE_READY 0xB0000000 76#define MFI_STATE_READY 0xB0000000
77#define MFI_STATE_OPERATIONAL 0xC0000000 77#define MFI_STATE_OPERATIONAL 0xC0000000
78#define MFI_STATE_FAULT 0xF0000000 78#define MFI_STATE_FAULT 0xF0000000
79#define MFI_RESET_REQUIRED 0x00000001 79#define MFI_RESET_REQUIRED 0x00000001
80 80#define MFI_RESET_ADAPTER 0x00000002
81#define MEGAMFI_FRAME_SIZE 64 81#define MEGAMFI_FRAME_SIZE 64
82 82
83/* 83/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 89c623ebadbc..2d8cdce7b2f5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.05.34-rc1 21 * Version : v00.00.05.38-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -437,15 +437,18 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
437static int 437static int
438megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 438megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
439{ 439{
440 u32 status; 440 u32 status, mfiStatus = 0;
441
441 /* 442 /*
442 * Check if it is our interrupt 443 * Check if it is our interrupt
443 */ 444 */
444 status = readl(&regs->outbound_intr_status); 445 status = readl(&regs->outbound_intr_status);
445 446
446 if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { 447 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
447 return 0; 448 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
448 } 449
450 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
451 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
449 452
450 /* 453 /*
451 * Clear the interrupt by writing back the same value 454 * Clear the interrupt by writing back the same value
@@ -455,8 +458,9 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
455 /* Dummy readl to force pci flush */ 458 /* Dummy readl to force pci flush */
456 readl(&regs->outbound_doorbell_clear); 459 readl(&regs->outbound_doorbell_clear);
457 460
458 return 1; 461 return mfiStatus;
459} 462}
463
460/** 464/**
461 * megasas_fire_cmd_ppc - Sends command to the FW 465 * megasas_fire_cmd_ppc - Sends command to the FW
462 * @frame_phys_addr : Physical address of cmd 466 * @frame_phys_addr : Physical address of cmd
@@ -477,17 +481,6 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
477} 481}
478 482
479/** 483/**
480 * megasas_adp_reset_ppc - For controller reset
481 * @regs: MFI register set
482 */
483static int
484megasas_adp_reset_ppc(struct megasas_instance *instance,
485 struct megasas_register_set __iomem *regs)
486{
487 return 0;
488}
489
490/**
491 * megasas_check_reset_ppc - For controller reset check 484 * megasas_check_reset_ppc - For controller reset check
492 * @regs: MFI register set 485 * @regs: MFI register set
493 */ 486 */
@@ -495,8 +488,12 @@ static int
495megasas_check_reset_ppc(struct megasas_instance *instance, 488megasas_check_reset_ppc(struct megasas_instance *instance,
496 struct megasas_register_set __iomem *regs) 489 struct megasas_register_set __iomem *regs)
497{ 490{
491 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
492 return 1;
493
498 return 0; 494 return 0;
499} 495}
496
500static struct megasas_instance_template megasas_instance_template_ppc = { 497static struct megasas_instance_template megasas_instance_template_ppc = {
501 498
502 .fire_cmd = megasas_fire_cmd_ppc, 499 .fire_cmd = megasas_fire_cmd_ppc,
@@ -504,7 +501,7 @@ static struct megasas_instance_template megasas_instance_template_ppc = {
504 .disable_intr = megasas_disable_intr_ppc, 501 .disable_intr = megasas_disable_intr_ppc,
505 .clear_intr = megasas_clear_intr_ppc, 502 .clear_intr = megasas_clear_intr_ppc,
506 .read_fw_status_reg = megasas_read_fw_status_reg_ppc, 503 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
507 .adp_reset = megasas_adp_reset_ppc, 504 .adp_reset = megasas_adp_reset_xscale,
508 .check_reset = megasas_check_reset_ppc, 505 .check_reset = megasas_check_reset_ppc,
509 .service_isr = megasas_isr, 506 .service_isr = megasas_isr,
510 .tasklet = megasas_complete_cmd_dpc, 507 .tasklet = megasas_complete_cmd_dpc,
@@ -620,6 +617,9 @@ static int
620megasas_check_reset_skinny(struct megasas_instance *instance, 617megasas_check_reset_skinny(struct megasas_instance *instance,
621 struct megasas_register_set __iomem *regs) 618 struct megasas_register_set __iomem *regs)
622{ 619{
620 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
621 return 1;
622
623 return 0; 623 return 0;
624} 624}
625 625
@@ -3454,7 +3454,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3454{ 3454{
3455 u32 max_sectors_1; 3455 u32 max_sectors_1;
3456 u32 max_sectors_2; 3456 u32 max_sectors_2;
3457 u32 tmp_sectors; 3457 u32 tmp_sectors, msix_enable;
3458 struct megasas_register_set __iomem *reg_set; 3458 struct megasas_register_set __iomem *reg_set;
3459 struct megasas_ctrl_info *ctrl_info; 3459 struct megasas_ctrl_info *ctrl_info;
3460 unsigned long bar_list; 3460 unsigned long bar_list;
@@ -3507,6 +3507,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
3507 if (megasas_transition_to_ready(instance)) 3507 if (megasas_transition_to_ready(instance))
3508 goto fail_ready_state; 3508 goto fail_ready_state;
3509 3509
3510 /* Check if MSI-X is supported while in ready state */
3511 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
3512 0x4000000) >> 0x1a;
3513 if (msix_enable && !msix_disable &&
3514 !pci_enable_msix(instance->pdev, &instance->msixentry, 1))
3515 instance->msi_flag = 1;
3516
3510 /* Get operational params, sge flags, send init cmd to controller */ 3517 /* Get operational params, sge flags, send init cmd to controller */
3511 if (instance->instancet->init_adapter(instance)) 3518 if (instance->instancet->init_adapter(instance))
3512 goto fail_init_adapter; 3519 goto fail_init_adapter;
@@ -4076,14 +4083,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4076 else 4083 else
4077 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 4084 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
4078 4085
4079 /* Try to enable MSI-X */
4080 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
4081 (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
4082 (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
4083 !msix_disable && !pci_enable_msix(instance->pdev,
4084 &instance->msixentry, 1))
4085 instance->msi_flag = 1;
4086
4087 /* 4086 /*
4088 * Initialize MFI Firmware 4087 * Initialize MFI Firmware
4089 */ 4088 */
@@ -4116,6 +4115,14 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4116 megasas_mgmt_info.max_index++; 4115 megasas_mgmt_info.max_index++;
4117 4116
4118 /* 4117 /*
4118 * Register with SCSI mid-layer
4119 */
4120 if (megasas_io_attach(instance))
4121 goto fail_io_attach;
4122
4123 instance->unload = 0;
4124
4125 /*
4119 * Initiate AEN (Asynchronous Event Notification) 4126 * Initiate AEN (Asynchronous Event Notification)
4120 */ 4127 */
4121 if (megasas_start_aen(instance)) { 4128 if (megasas_start_aen(instance)) {
@@ -4123,13 +4130,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4123 goto fail_start_aen; 4130 goto fail_start_aen;
4124 } 4131 }
4125 4132
4126 /*
4127 * Register with SCSI mid-layer
4128 */
4129 if (megasas_io_attach(instance))
4130 goto fail_io_attach;
4131
4132 instance->unload = 0;
4133 return 0; 4133 return 0;
4134 4134
4135 fail_start_aen: 4135 fail_start_aen:
@@ -4332,10 +4332,6 @@ megasas_resume(struct pci_dev *pdev)
4332 if (megasas_set_dma_mask(pdev)) 4332 if (megasas_set_dma_mask(pdev))
4333 goto fail_set_dma_mask; 4333 goto fail_set_dma_mask;
4334 4334
4335 /* Now re-enable MSI-X */
4336 if (instance->msi_flag)
4337 pci_enable_msix(instance->pdev, &instance->msixentry, 1);
4338
4339 /* 4335 /*
4340 * Initialize MFI Firmware 4336 * Initialize MFI Firmware
4341 */ 4337 */
@@ -4348,6 +4344,10 @@ megasas_resume(struct pci_dev *pdev)
4348 if (megasas_transition_to_ready(instance)) 4344 if (megasas_transition_to_ready(instance))
4349 goto fail_ready_state; 4345 goto fail_ready_state;
4350 4346
4347 /* Now re-enable MSI-X */
4348 if (instance->msi_flag)
4349 pci_enable_msix(instance->pdev, &instance->msixentry, 1);
4350
4351 switch (instance->pdev->device) { 4351 switch (instance->pdev->device) {
4352 case PCI_DEVICE_ID_LSI_FUSION: 4352 case PCI_DEVICE_ID_LSI_FUSION:
4353 { 4353 {
@@ -4384,12 +4384,6 @@ megasas_resume(struct pci_dev *pdev)
4384 4384
4385 instance->instancet->enable_intr(instance->reg_set); 4385 instance->instancet->enable_intr(instance->reg_set);
4386 4386
4387 /*
4388 * Initiate AEN (Asynchronous Event Notification)
4389 */
4390 if (megasas_start_aen(instance))
4391 printk(KERN_ERR "megasas: Start AEN failed\n");
4392
4393 /* Initialize the cmd completion timer */ 4387 /* Initialize the cmd completion timer */
4394 if (poll_mode_io) 4388 if (poll_mode_io)
4395 megasas_start_timer(instance, &instance->io_completion_timer, 4389 megasas_start_timer(instance, &instance->io_completion_timer,
@@ -4397,6 +4391,12 @@ megasas_resume(struct pci_dev *pdev)
4397 MEGASAS_COMPLETION_TIMER_INTERVAL); 4391 MEGASAS_COMPLETION_TIMER_INTERVAL);
4398 instance->unload = 0; 4392 instance->unload = 0;
4399 4393
4394 /*
4395 * Initiate AEN (Asynchronous Event Notification)
4396 */
4397 if (megasas_start_aen(instance))
4398 printk(KERN_ERR "megasas: Start AEN failed\n");
4399
4400 return 0; 4400 return 0;
4401 4401
4402fail_irq: 4402fail_irq:
@@ -4527,6 +4527,11 @@ static void megasas_shutdown(struct pci_dev *pdev)
4527 instance->unload = 1; 4527 instance->unload = 1;
4528 megasas_flush_cache(instance); 4528 megasas_flush_cache(instance);
4529 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 4529 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
4530 instance->instancet->disable_intr(instance->reg_set);
4531 free_irq(instance->msi_flag ? instance->msixentry.vector :
4532 instance->pdev->irq, instance);
4533 if (instance->msi_flag)
4534 pci_disable_msix(instance->pdev);
4530} 4535}
4531 4536
4532/** 4537/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 145a8cffb1fa..f13e7abd345a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -696,22 +696,6 @@ fail_get_cmd:
696} 696}
697 697
698/* 698/*
699 * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID
700 * @instance: Adapter soft state
701 *
702 */
703void
704megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
705{
706 struct fusion_context *fusion;
707 struct megasas_cmd_fusion *cmd;
708
709 fusion = instance->ctrl_context;
710 cmd = fusion->cmd_list[smid - 1];
711 megasas_return_cmd_fusion(instance, cmd);
712}
713
714/*
715 * megasas_get_ld_map_info - Returns FW's ld_map structure 699 * megasas_get_ld_map_info - Returns FW's ld_map structure
716 * @instance: Adapter soft state 700 * @instance: Adapter soft state
717 * @pend: Pend the command or not 701 * @pend: Pend the command or not
@@ -1153,7 +1137,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1153 u64 start_blk = io_info->pdBlock; 1137 u64 start_blk = io_info->pdBlock;
1154 u8 *cdb = io_request->CDB.CDB32; 1138 u8 *cdb = io_request->CDB.CDB32;
1155 u32 num_blocks = io_info->numBlocks; 1139 u32 num_blocks = io_info->numBlocks;
1156 u8 opcode, flagvals, groupnum, control; 1140 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1157 1141
1158 /* Check if T10 PI (DIF) is enabled for this LD */ 1142 /* Check if T10 PI (DIF) is enabled for this LD */
1159 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1143 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
@@ -1235,7 +1219,46 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1235 cdb[8] = (u8)(num_blocks & 0xff); 1219 cdb[8] = (u8)(num_blocks & 0xff);
1236 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1220 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1237 1221
1222 io_request->IoFlags = 10; /* Specify 10-byte cdb */
1238 cdb_len = 10; 1223 cdb_len = 10;
1224 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1225 /* Convert to 16 byte CDB for large LBA's */
1226 switch (cdb_len) {
1227 case 6:
1228 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1229 control = cdb[5];
1230 break;
1231 case 10:
1232 opcode =
1233 cdb[0] == READ_10 ? READ_16 : WRITE_16;
1234 flagvals = cdb[1];
1235 groupnum = cdb[6];
1236 control = cdb[9];
1237 break;
1238 case 12:
1239 opcode =
1240 cdb[0] == READ_12 ? READ_16 : WRITE_16;
1241 flagvals = cdb[1];
1242 groupnum = cdb[10];
1243 control = cdb[11];
1244 break;
1245 }
1246
1247 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1248
1249 cdb[0] = opcode;
1250 cdb[1] = flagvals;
1251 cdb[14] = groupnum;
1252 cdb[15] = control;
1253
1254 /* Transfer length */
1255 cdb[13] = (u8)(num_blocks & 0xff);
1256 cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1257 cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1258 cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1259
1260 io_request->IoFlags = 16; /* Specify 16-byte cdb */
1261 cdb_len = 16;
1239 } 1262 }
1240 1263
1241 /* Normal case, just load LBA here */ 1264 /* Normal case, just load LBA here */
@@ -2026,17 +2049,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2026 struct fusion_context *fusion; 2049 struct fusion_context *fusion;
2027 struct megasas_cmd *cmd_mfi; 2050 struct megasas_cmd *cmd_mfi;
2028 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2051 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2029 u32 host_diag, abs_state; 2052 u32 host_diag, abs_state, status_reg, reset_adapter;
2030 2053
2031 instance = (struct megasas_instance *)shost->hostdata; 2054 instance = (struct megasas_instance *)shost->hostdata;
2032 fusion = instance->ctrl_context; 2055 fusion = instance->ctrl_context;
2033 2056
2034 mutex_lock(&instance->reset_mutex);
2035 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2036 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2037 instance->instancet->disable_intr(instance->reg_set);
2038 msleep(1000);
2039
2040 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2057 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2041 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2058 printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
2042 "returning FAILED.\n"); 2059 "returning FAILED.\n");
@@ -2044,6 +2061,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2044 goto out; 2061 goto out;
2045 } 2062 }
2046 2063
2064 mutex_lock(&instance->reset_mutex);
2065 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2066 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2067 instance->instancet->disable_intr(instance->reg_set);
2068 msleep(1000);
2069
2047 /* First try waiting for commands to complete */ 2070 /* First try waiting for commands to complete */
2048 if (megasas_wait_for_outstanding_fusion(instance)) { 2071 if (megasas_wait_for_outstanding_fusion(instance)) {
2049 printk(KERN_WARNING "megaraid_sas: resetting fusion " 2072 printk(KERN_WARNING "megaraid_sas: resetting fusion "
@@ -2060,7 +2083,12 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2060 } 2083 }
2061 } 2084 }
2062 2085
2063 if (instance->disableOnlineCtrlReset == 1) { 2086 status_reg = instance->instancet->read_fw_status_reg(
2087 instance->reg_set);
2088 abs_state = status_reg & MFI_STATE_MASK;
2089 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2090 if (instance->disableOnlineCtrlReset ||
2091 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2064 /* Reset not supported, kill adapter */ 2092 /* Reset not supported, kill adapter */
2065 printk(KERN_WARNING "megaraid_sas: Reset not supported" 2093 printk(KERN_WARNING "megaraid_sas: Reset not supported"
2066 ", killing adapter.\n"); 2094 ", killing adapter.\n");
@@ -2089,6 +2117,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2089 2117
2090 /* Check that the diag write enable (DRWE) bit is on */ 2118 /* Check that the diag write enable (DRWE) bit is on */
2091 host_diag = readl(&instance->reg_set->fusion_host_diag); 2119 host_diag = readl(&instance->reg_set->fusion_host_diag);
2120 retry = 0;
2092 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2121 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2093 msleep(100); 2122 msleep(100);
2094 host_diag = 2123 host_diag =
@@ -2126,7 +2155,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2126 2155
2127 abs_state = 2156 abs_state =
2128 instance->instancet->read_fw_status_reg( 2157 instance->instancet->read_fw_status_reg(
2129 instance->reg_set); 2158 instance->reg_set) & MFI_STATE_MASK;
2130 retry = 0; 2159 retry = 0;
2131 2160
2132 while ((abs_state <= MFI_STATE_FW_INIT) && 2161 while ((abs_state <= MFI_STATE_FW_INIT) &&
@@ -2134,7 +2163,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2134 msleep(100); 2163 msleep(100);
2135 abs_state = 2164 abs_state =
2136 instance->instancet->read_fw_status_reg( 2165 instance->instancet->read_fw_status_reg(
2137 instance->reg_set); 2166 instance->reg_set) & MFI_STATE_MASK;
2138 } 2167 }
2139 if (abs_state <= MFI_STATE_FW_INIT) { 2168 if (abs_state <= MFI_STATE_FW_INIT) {
2140 printk(KERN_WARNING "megaraid_sas: firmware " 2169 printk(KERN_WARNING "megaraid_sas: firmware "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 2a3c05f6db8b..dcc289c25459 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "08.100.00.01" 72#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
73#define MPT2SAS_MAJOR_VERSION 08 73#define MPT2SAS_MAJOR_VERSION 08
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 01 76#define MPT2SAS_RELEASE_VERSION 02
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index f12e02358d6d..a7dbc6825f5f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -113,6 +113,7 @@ struct sense_info {
113}; 113};
114 114
115 115
116#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
116#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) 117#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
117 118
118/** 119/**
@@ -121,6 +122,7 @@ struct sense_info {
121 * @work: work object (ioc->fault_reset_work_q) 122 * @work: work object (ioc->fault_reset_work_q)
122 * @cancel_pending_work: flag set during reset handling 123 * @cancel_pending_work: flag set during reset handling
123 * @ioc: per adapter object 124 * @ioc: per adapter object
125 * @device_handle: device handle
124 * @VF_ID: virtual function id 126 * @VF_ID: virtual function id
125 * @VP_ID: virtual port id 127 * @VP_ID: virtual port id
126 * @ignore: flag meaning this event has been marked to ignore 128 * @ignore: flag meaning this event has been marked to ignore
@@ -134,6 +136,7 @@ struct fw_event_work {
134 u8 cancel_pending_work; 136 u8 cancel_pending_work;
135 struct delayed_work delayed_work; 137 struct delayed_work delayed_work;
136 struct MPT2SAS_ADAPTER *ioc; 138 struct MPT2SAS_ADAPTER *ioc;
139 u16 device_handle;
137 u8 VF_ID; 140 u8 VF_ID;
138 u8 VP_ID; 141 u8 VP_ID;
139 u8 ignore; 142 u8 ignore;
@@ -3499,6 +3502,7 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
3499 3502
3500 switch (prot_type) { 3503 switch (prot_type) {
3501 case SCSI_PROT_DIF_TYPE1: 3504 case SCSI_PROT_DIF_TYPE1:
3505 case SCSI_PROT_DIF_TYPE2:
3502 3506
3503 /* 3507 /*
3504 * enable ref/guard checking 3508 * enable ref/guard checking
@@ -3511,13 +3515,6 @@ _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
3511 cpu_to_be32(scsi_get_lba(scmd)); 3515 cpu_to_be32(scsi_get_lba(scmd));
3512 break; 3516 break;
3513 3517
3514 case SCSI_PROT_DIF_TYPE2:
3515
3516 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3517 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3518 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3519 break;
3520
3521 case SCSI_PROT_DIF_TYPE3: 3518 case SCSI_PROT_DIF_TYPE3:
3522 3519
3523 /* 3520 /*
@@ -4047,17 +4044,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4047#endif 4044#endif
4048 4045
4049/** 4046/**
4050 * _scsih_smart_predicted_fault - illuminate Fault LED 4047 * _scsih_turn_on_fault_led - illuminate Fault LED
4051 * @ioc: per adapter object 4048 * @ioc: per adapter object
4052 * @handle: device handle 4049 * @handle: device handle
4050 * Context: process
4053 * 4051 *
4054 * Return nothing. 4052 * Return nothing.
4055 */ 4053 */
4056static void 4054static void
4057_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) 4055_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4058{ 4056{
4059 Mpi2SepReply_t mpi_reply; 4057 Mpi2SepReply_t mpi_reply;
4060 Mpi2SepRequest_t mpi_request; 4058 Mpi2SepRequest_t mpi_request;
4059
4060 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
4061 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4062 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4063 mpi_request.SlotStatus =
4064 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
4065 mpi_request.DevHandle = cpu_to_le16(handle);
4066 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
4067 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4068 &mpi_request)) != 0) {
4069 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
4070 __FILE__, __LINE__, __func__);
4071 return;
4072 }
4073
4074 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4075 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
4076 "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
4077 le16_to_cpu(mpi_reply.IOCStatus),
4078 le32_to_cpu(mpi_reply.IOCLogInfo)));
4079 return;
4080 }
4081}
4082
4083/**
4084 * _scsih_send_event_to_turn_on_fault_led - fire delayed event
4085 * @ioc: per adapter object
4086 * @handle: device handle
4087 * Context: interrupt.
4088 *
4089 * Return nothing.
4090 */
4091static void
4092_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4093{
4094 struct fw_event_work *fw_event;
4095
4096 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
4097 if (!fw_event)
4098 return;
4099 fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
4100 fw_event->device_handle = handle;
4101 fw_event->ioc = ioc;
4102 _scsih_fw_event_add(ioc, fw_event);
4103}
4104
4105/**
4106 * _scsih_smart_predicted_fault - process smart errors
4107 * @ioc: per adapter object
4108 * @handle: device handle
4109 * Context: interrupt.
4110 *
4111 * Return nothing.
4112 */
4113static void
4114_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4115{
4061 struct scsi_target *starget; 4116 struct scsi_target *starget;
4062 struct MPT2SAS_TARGET *sas_target_priv_data; 4117 struct MPT2SAS_TARGET *sas_target_priv_data;
4063 Mpi2EventNotificationReply_t *event_reply; 4118 Mpi2EventNotificationReply_t *event_reply;
@@ -4084,30 +4139,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4084 starget_printk(KERN_WARNING, starget, "predicted fault\n"); 4139 starget_printk(KERN_WARNING, starget, "predicted fault\n");
4085 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4140 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4086 4141
4087 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) { 4142 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
4088 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 4143 _scsih_send_event_to_turn_on_fault_led(ioc, handle);
4089 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4090 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4091 mpi_request.SlotStatus =
4092 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
4093 mpi_request.DevHandle = cpu_to_le16(handle);
4094 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
4095 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4096 &mpi_request)) != 0) {
4097 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4098 ioc->name, __FILE__, __LINE__, __func__);
4099 return;
4100 }
4101
4102 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4103 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
4104 "enclosure_processor: ioc_status (0x%04x), "
4105 "loginfo(0x%08x)\n", ioc->name,
4106 le16_to_cpu(mpi_reply.IOCStatus),
4107 le32_to_cpu(mpi_reply.IOCLogInfo)));
4108 return;
4109 }
4110 }
4111 4144
4112 /* insert into event log */ 4145 /* insert into event log */
4113 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 4146 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -6753,6 +6786,9 @@ _firmware_event_work(struct work_struct *work)
6753 } 6786 }
6754 6787
6755 switch (fw_event->event) { 6788 switch (fw_event->event) {
6789 case MPT2SAS_TURN_ON_FAULT_LED:
6790 _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
6791 break;
6756 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 6792 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6757 _scsih_sas_topology_change_event(ioc, fw_event); 6793 _scsih_sas_topology_change_event(ioc, fw_event);
6758 break; 6794 break;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 58f5be4740e9..de0b1a704fb5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4698,12 +4698,14 @@ static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
4698 break; 4698 break;
4699 4699
4700 if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) { 4700 if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
4701 int j;
4702
4701 STp->pos_unknown = 0; 4703 STp->pos_unknown = 0;
4702 STp->partition = STp->new_partition = 0; 4704 STp->partition = STp->new_partition = 0;
4703 if (STp->can_partitions) 4705 if (STp->can_partitions)
4704 STp->nbr_partitions = 1; /* This guess will be updated later if necessary */ 4706 STp->nbr_partitions = 1; /* This guess will be updated later if necessary */
4705 for (i=0; i < ST_NBR_PARTITIONS; i++) { 4707 for (j = 0; j < ST_NBR_PARTITIONS; j++) {
4706 STps = &(STp->ps[i]); 4708 STps = &(STp->ps[j]);
4707 STps->rw = ST_IDLE; 4709 STps->rw = ST_IDLE;
4708 STps->eof = ST_NOEOF; 4710 STps->eof = ST_NOEOF;
4709 STps->at_sm = 0; 4711 STps->at_sm = 0;
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 0339ff03a535..252523d7847e 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ 1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nx.o ql4_nvram.o ql4_dbg.o 2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o
3 3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o 4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5 5
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 000000000000..864d018631c0
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,69 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11
12/* Scsi_Host attributes. */
13static ssize_t
14qla4xxx_fw_version_show(struct device *dev,
15 struct device_attribute *attr, char *buf)
16{
17 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
18
19 if (is_qla8022(ha))
20 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
21 ha->firmware_version[0],
22 ha->firmware_version[1],
23 ha->patch_number, ha->build_number);
24 else
25 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
26 ha->firmware_version[0],
27 ha->firmware_version[1],
28 ha->patch_number, ha->build_number);
29}
30
31static ssize_t
32qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
33 char *buf)
34{
35 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
36 return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
37}
38
39static ssize_t
40qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
41 char *buf)
42{
43 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
44 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
45 ha->iscsi_minor);
46}
47
48static ssize_t
49qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
50 char *buf)
51{
52 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
53 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
54 ha->bootload_major, ha->bootload_minor,
55 ha->bootload_patch, ha->bootload_build);
56}
57
58static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
59static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
60static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
61static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
62
63struct device_attribute *qla4xxx_host_attrs[] = {
64 &dev_attr_fw_version,
65 &dev_attr_serial_num,
66 &dev_attr_iscsi_version,
67 &dev_attr_optrom_version,
68 NULL,
69};
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4757878d59dd..473c5c872b39 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -115,7 +115,7 @@
115#define INVALID_ENTRY 0xFFFF 115#define INVALID_ENTRY 0xFFFF
116#define MAX_CMDS_TO_RISC 1024 116#define MAX_CMDS_TO_RISC 1024
117#define MAX_SRBS MAX_CMDS_TO_RISC 117#define MAX_SRBS MAX_CMDS_TO_RISC
118#define MBOX_AEN_REG_COUNT 5 118#define MBOX_AEN_REG_COUNT 8
119#define MAX_INIT_RETRIES 5 119#define MAX_INIT_RETRIES 5
120 120
121/* 121/*
@@ -368,7 +368,6 @@ struct scsi_qla_host {
368#define AF_INIT_DONE 1 /* 0x00000002 */ 368#define AF_INIT_DONE 1 /* 0x00000002 */
369#define AF_MBOX_COMMAND 2 /* 0x00000004 */ 369#define AF_MBOX_COMMAND 2 /* 0x00000004 */
370#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ 370#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
371#define AF_DPC_SCHEDULED 5 /* 0x00000020 */
372#define AF_INTERRUPTS_ON 6 /* 0x00000040 */ 371#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
373#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 372#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
374#define AF_LINK_UP 8 /* 0x00000100 */ 373#define AF_LINK_UP 8 /* 0x00000100 */
@@ -584,6 +583,14 @@ struct scsi_qla_host {
584 uint32_t nx_reset_timeout; 583 uint32_t nx_reset_timeout;
585 584
586 struct completion mbx_intr_comp; 585 struct completion mbx_intr_comp;
586
587 /* --- From About Firmware --- */
588 uint16_t iscsi_major;
589 uint16_t iscsi_minor;
590 uint16_t bootload_major;
591 uint16_t bootload_minor;
592 uint16_t bootload_patch;
593 uint16_t bootload_build;
587}; 594};
588 595
589static inline int is_ipv4_enabled(struct scsi_qla_host *ha) 596static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 31e2bf97198c..01082aa77098 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -690,6 +690,29 @@ struct mbx_sys_info {
690 uint8_t reserved[12]; /* 34-3f */ 690 uint8_t reserved[12]; /* 34-3f */
691}; 691};
692 692
693struct about_fw_info {
694 uint16_t fw_major; /* 00 - 01 */
695 uint16_t fw_minor; /* 02 - 03 */
696 uint16_t fw_patch; /* 04 - 05 */
697 uint16_t fw_build; /* 06 - 07 */
698 uint8_t fw_build_date[16]; /* 08 - 17 ASCII String */
699 uint8_t fw_build_time[16]; /* 18 - 27 ASCII String */
700 uint8_t fw_build_user[16]; /* 28 - 37 ASCII String */
701 uint16_t fw_load_source; /* 38 - 39 */
702 /* 1 = Flash Primary,
703 2 = Flash Secondary,
704 3 = Host Download
705 */
706 uint8_t reserved1[6]; /* 3A - 3F */
707 uint16_t iscsi_major; /* 40 - 41 */
708 uint16_t iscsi_minor; /* 42 - 43 */
709 uint16_t bootload_major; /* 44 - 45 */
710 uint16_t bootload_minor; /* 46 - 47 */
711 uint16_t bootload_patch; /* 48 - 49 */
712 uint16_t bootload_build; /* 4A - 4B */
713 uint8_t reserved2[180]; /* 4C - FF */
714};
715
693struct crash_record { 716struct crash_record {
694 uint16_t fw_major_version; /* 00 - 01 */ 717 uint16_t fw_major_version; /* 00 - 01 */
695 uint16_t fw_minor_version; /* 02 - 03 */ 718 uint16_t fw_minor_version; /* 02 - 03 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index cc53e3fbd78c..a53a256c1f8d 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -61,7 +61,7 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
61int qla4xxx_add_sess(struct ddb_entry *); 61int qla4xxx_add_sess(struct ddb_entry *);
62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry); 62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha); 63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
64int qla4xxx_get_fw_version(struct scsi_qla_host * ha); 64int qla4xxx_about_firmware(struct scsi_qla_host *ha);
65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha, 65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
66 uint32_t intr_status); 66 uint32_t intr_status);
67int qla4xxx_init_rings(struct scsi_qla_host *ha); 67int qla4xxx_init_rings(struct scsi_qla_host *ha);
@@ -139,4 +139,5 @@ extern int ql4xextended_error_logging;
139extern int ql4xdontresethba; 139extern int ql4xdontresethba;
140extern int ql4xenablemsix; 140extern int ql4xenablemsix;
141 141
142extern struct device_attribute *qla4xxx_host_attrs[];
142#endif /* _QLA4x_GBL_H */ 143#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 48e2241ddaf4..42ed5db2d530 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1275,7 +1275,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1275 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR) 1275 if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
1276 goto exit_init_hba; 1276 goto exit_init_hba;
1277 1277
1278 if (qla4xxx_get_fw_version(ha) == QLA_ERROR) 1278 if (qla4xxx_about_firmware(ha) == QLA_ERROR)
1279 goto exit_init_hba; 1279 goto exit_init_hba;
1280 1280
1281 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR) 1281 if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2f40ac761cd4..0e72921c752d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -25,9 +25,14 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
25 25
26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); 27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28 if (sense_len == 0) 28 if (sense_len == 0) {
29 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
30 " sense len 0\n", ha->host_no,
31 cmd->device->channel, cmd->device->id,
32 cmd->device->lun, __func__));
33 ha->status_srb = NULL;
29 return; 34 return;
30 35 }
31 /* Save total available sense length, 36 /* Save total available sense length,
32 * not to exceed cmd's sense buffer size */ 37 * not to exceed cmd's sense buffer size */
33 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); 38 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
@@ -541,6 +546,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
541 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ 546 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
542 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: 547 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
543 case MBOX_ASTS_SUBNET_STATE_CHANGE: 548 case MBOX_ASTS_SUBNET_STATE_CHANGE:
549 case MBOX_ASTS_DUPLICATE_IP:
544 /* No action */ 550 /* No action */
545 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, 551 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
546 mbox_status)); 552 mbox_status));
@@ -593,11 +599,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
593 mbox_sts[i]; 599 mbox_sts[i];
594 600
595 /* print debug message */ 601 /* print debug message */
596 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 602 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
597 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 603 "mb1:0x%x mb2:0x%x mb3:0x%x "
598 ha->host_no, ha->aen_in, mbox_sts[0], 604 "mb4:0x%x mb5:0x%x\n",
599 mbox_sts[1], mbox_sts[2], mbox_sts[3], 605 ha->host_no, ha->aen_in,
600 mbox_sts[4])); 606 mbox_sts[0], mbox_sts[1],
607 mbox_sts[2], mbox_sts[3],
608 mbox_sts[4], mbox_sts[5]));
601 609
602 /* advance pointer */ 610 /* advance pointer */
603 ha->aen_in++; 611 ha->aen_in++;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index d78b58dc5011..fce8289e9752 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -86,22 +86,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
86 msleep(10); 86 msleep(10);
87 } 87 }
88 88
89 /* To prevent overwriting mailbox registers for a command that has
90 * not yet been serviced, check to see if an active command
91 * (AEN, IOCB, etc.) is interrupting, then service it.
92 * -----------------------------------------------------------------
93 */
94 spin_lock_irqsave(&ha->hardware_lock, flags); 89 spin_lock_irqsave(&ha->hardware_lock, flags);
95 90
96 if (!is_qla8022(ha)) {
97 intr_status = readl(&ha->reg->ctrl_status);
98 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
99 /* Service existing interrupt */
100 ha->isp_ops->interrupt_service_routine(ha, intr_status);
101 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
102 }
103 }
104
105 ha->mbox_status_count = outCount; 91 ha->mbox_status_count = outCount;
106 for (i = 0; i < outCount; i++) 92 for (i = 0; i < outCount; i++)
107 ha->mbox_status[i] = 0; 93 ha->mbox_status[i] = 0;
@@ -1057,38 +1043,65 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
1057} 1043}
1058 1044
1059/** 1045/**
1060 * qla4xxx_get_fw_version - gets firmware version 1046 * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
1061 * @ha: Pointer to host adapter structure. 1047 * @ha: Pointer to host adapter structure.
1062 * 1048 *
1063 * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may 1049 * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
1064 * hold an address for data. Make sure that we write 0 to those mailboxes, 1050 * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
1065 * if unused. 1051 * those mailboxes, if unused.
1066 **/ 1052 **/
1067int qla4xxx_get_fw_version(struct scsi_qla_host * ha) 1053int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1068{ 1054{
1055 struct about_fw_info *about_fw = NULL;
1056 dma_addr_t about_fw_dma;
1069 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1057 uint32_t mbox_cmd[MBOX_REG_COUNT];
1070 uint32_t mbox_sts[MBOX_REG_COUNT]; 1058 uint32_t mbox_sts[MBOX_REG_COUNT];
1059 int status = QLA_ERROR;
1060
1061 about_fw = dma_alloc_coherent(&ha->pdev->dev,
1062 sizeof(struct about_fw_info),
1063 &about_fw_dma, GFP_KERNEL);
1064 if (!about_fw) {
1065 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
1066 "for about_fw\n", __func__));
1067 return status;
1068 }
1071 1069
1072 /* Get firmware version. */ 1070 memset(about_fw, 0, sizeof(struct about_fw_info));
1073 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 1071 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1074 memset(&mbox_sts, 0, sizeof(mbox_sts)); 1072 memset(&mbox_sts, 0, sizeof(mbox_sts));
1075 1073
1076 mbox_cmd[0] = MBOX_CMD_ABOUT_FW; 1074 mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
1077 1075 mbox_cmd[2] = LSDW(about_fw_dma);
1078 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) != 1076 mbox_cmd[3] = MSDW(about_fw_dma);
1079 QLA_SUCCESS) { 1077 mbox_cmd[4] = sizeof(struct about_fw_info);
1080 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ " 1078
1081 "status %04X\n", ha->host_no, __func__, mbox_sts[0])); 1079 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
1082 return QLA_ERROR; 1080 &mbox_cmd[0], &mbox_sts[0]);
1081 if (status != QLA_SUCCESS) {
1082 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
1083 "failed w/ status %04X\n", __func__,
1084 mbox_sts[0]));
1085 goto exit_about_fw;
1083 } 1086 }
1084 1087
1085 /* Save firmware version information. */ 1088 /* Save version information. */
1086 ha->firmware_version[0] = mbox_sts[1]; 1089 ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
1087 ha->firmware_version[1] = mbox_sts[2]; 1090 ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
1088 ha->patch_number = mbox_sts[3]; 1091 ha->patch_number = le16_to_cpu(about_fw->fw_patch);
1089 ha->build_number = mbox_sts[4]; 1092 ha->build_number = le16_to_cpu(about_fw->fw_build);
1093 ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
1094 ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
1095 ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
1096 ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
1097 ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
1098 ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
1099 status = QLA_SUCCESS;
1090 1100
1091 return QLA_SUCCESS; 1101exit_about_fw:
1102 dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
1103 about_fw, about_fw_dma);
1104 return status;
1092} 1105}
1093 1106
1094static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, 1107static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 03e522b2fe0b..fdfe27b38698 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -964,12 +964,26 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
964 /* Halt all the indiviual PEGs and other blocks of the ISP */ 964 /* Halt all the indiviual PEGs and other blocks of the ISP */
965 qla4_8xxx_rom_lock(ha); 965 qla4_8xxx_rom_lock(ha);
966 966
967 /* mask all niu interrupts */ 967 /* disable all I2Q */
968 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
969 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
970 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
971 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
972 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
973 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
974
975 /* disable all niu interrupts */
968 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 976 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
969 /* disable xge rx/tx */ 977 /* disable xge rx/tx */
970 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 978 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
971 /* disable xg1 rx/tx */ 979 /* disable xg1 rx/tx */
972 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 980 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
981 /* disable sideband mac */
982 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
983 /* disable ap0 mac */
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
985 /* disable ap1 mac */
986 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
973 987
974 /* halt sre */ 988 /* halt sre */
975 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 989 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
@@ -984,6 +998,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 998 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
985 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 999 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
986 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1000 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1001 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
987 1002
988 /* halt pegs */ 1003 /* halt pegs */
989 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1004 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
@@ -991,9 +1006,9 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
991 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1006 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
992 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1007 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
993 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1008 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1009 msleep(5);
994 1010
995 /* big hammer */ 1011 /* big hammer */
996 msleep(1000);
997 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 1012 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
998 /* don't reset CAM block on reset */ 1013 /* don't reset CAM block on reset */
999 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1014 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c22f2a764d9d..f2364ec59f03 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -124,6 +124,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
124 .sg_tablesize = SG_ALL, 124 .sg_tablesize = SG_ALL,
125 125
126 .max_sectors = 0xFFFF, 126 .max_sectors = 0xFFFF,
127 .shost_attrs = qla4xxx_host_attrs,
127}; 128};
128 129
129static struct iscsi_transport qla4xxx_iscsi_transport = { 130static struct iscsi_transport qla4xxx_iscsi_transport = {
@@ -412,8 +413,7 @@ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
412 413
413static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 414static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
414 struct ddb_entry *ddb_entry, 415 struct ddb_entry *ddb_entry,
415 struct scsi_cmnd *cmd, 416 struct scsi_cmnd *cmd)
416 void (*done)(struct scsi_cmnd *))
417{ 417{
418 struct srb *srb; 418 struct srb *srb;
419 419
@@ -427,7 +427,6 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
427 srb->cmd = cmd; 427 srb->cmd = cmd;
428 srb->flags = 0; 428 srb->flags = 0;
429 CMD_SP(cmd) = (void *)srb; 429 CMD_SP(cmd) = (void *)srb;
430 cmd->scsi_done = done;
431 430
432 return srb; 431 return srb;
433} 432}
@@ -458,9 +457,8 @@ void qla4xxx_srb_compl(struct kref *ref)
458 457
459/** 458/**
460 * qla4xxx_queuecommand - scsi layer issues scsi command to driver. 459 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
460 * @host: scsi host
461 * @cmd: Pointer to Linux's SCSI command structure 461 * @cmd: Pointer to Linux's SCSI command structure
462 * @done_fn: Function that the driver calls to notify the SCSI mid-layer
463 * that the command has been processed.
464 * 462 *
465 * Remarks: 463 * Remarks:
466 * This routine is invoked by Linux to send a SCSI command to the driver. 464 * This routine is invoked by Linux to send a SCSI command to the driver.
@@ -470,10 +468,9 @@ void qla4xxx_srb_compl(struct kref *ref)
470 * completion handling). Unfortunely, it sometimes calls the scheduler 468 * completion handling). Unfortunely, it sometimes calls the scheduler
471 * in interrupt context which is a big NO! NO!. 469 * in interrupt context which is a big NO! NO!.
472 **/ 470 **/
473static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd, 471static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
474 void (*done)(struct scsi_cmnd *))
475{ 472{
476 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 473 struct scsi_qla_host *ha = to_qla_host(host);
477 struct ddb_entry *ddb_entry = cmd->device->hostdata; 474 struct ddb_entry *ddb_entry = cmd->device->hostdata;
478 struct iscsi_cls_session *sess = ddb_entry->sess; 475 struct iscsi_cls_session *sess = ddb_entry->sess;
479 struct srb *srb; 476 struct srb *srb;
@@ -515,37 +512,29 @@ static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd,
515 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 512 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
516 goto qc_host_busy; 513 goto qc_host_busy;
517 514
518 spin_unlock_irq(ha->host->host_lock); 515 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
519
520 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
521 if (!srb) 516 if (!srb)
522 goto qc_host_busy_lock; 517 goto qc_host_busy;
523 518
524 rval = qla4xxx_send_command_to_isp(ha, srb); 519 rval = qla4xxx_send_command_to_isp(ha, srb);
525 if (rval != QLA_SUCCESS) 520 if (rval != QLA_SUCCESS)
526 goto qc_host_busy_free_sp; 521 goto qc_host_busy_free_sp;
527 522
528 spin_lock_irq(ha->host->host_lock);
529 return 0; 523 return 0;
530 524
531qc_host_busy_free_sp: 525qc_host_busy_free_sp:
532 qla4xxx_srb_free_dma(ha, srb); 526 qla4xxx_srb_free_dma(ha, srb);
533 mempool_free(srb, ha->srb_mempool); 527 mempool_free(srb, ha->srb_mempool);
534 528
535qc_host_busy_lock:
536 spin_lock_irq(ha->host->host_lock);
537
538qc_host_busy: 529qc_host_busy:
539 return SCSI_MLQUEUE_HOST_BUSY; 530 return SCSI_MLQUEUE_HOST_BUSY;
540 531
541qc_fail_command: 532qc_fail_command:
542 done(cmd); 533 cmd->scsi_done(cmd);
543 534
544 return 0; 535 return 0;
545} 536}
546 537
547static DEF_SCSI_QCMD(qla4xxx_queuecommand)
548
549/** 538/**
550 * qla4xxx_mem_free - frees memory allocated to adapter 539 * qla4xxx_mem_free - frees memory allocated to adapter
551 * @ha: Pointer to host adapter structure. 540 * @ha: Pointer to host adapter structure.
@@ -679,7 +668,27 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
679 if (ha->seconds_since_last_heartbeat == 2) { 668 if (ha->seconds_since_last_heartbeat == 2) {
680 ha->seconds_since_last_heartbeat = 0; 669 ha->seconds_since_last_heartbeat = 0;
681 halt_status = qla4_8xxx_rd_32(ha, 670 halt_status = qla4_8xxx_rd_32(ha,
682 QLA82XX_PEG_HALT_STATUS1); 671 QLA82XX_PEG_HALT_STATUS1);
672
673 ql4_printk(KERN_INFO, ha,
674 "scsi(%ld): %s, Dumping hw/fw registers:\n "
675 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
676 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
677 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
678 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
679 ha->host_no, __func__, halt_status,
680 qla4_8xxx_rd_32(ha,
681 QLA82XX_PEG_HALT_STATUS2),
682 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
683 0x3c),
684 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
685 0x3c),
686 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
687 0x3c),
688 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
689 0x3c),
690 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
691 0x3c));
683 692
684 /* Since we cannot change dev_state in interrupt 693 /* Since we cannot change dev_state in interrupt
685 * context, set appropriate DPC flag then wakeup 694 * context, set appropriate DPC flag then wakeup
@@ -715,7 +724,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
715 /* don't poll if reset is going on */ 724 /* don't poll if reset is going on */
716 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 725 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
717 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 726 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
718 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) { 727 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
719 if (dev_state == QLA82XX_DEV_NEED_RESET && 728 if (dev_state == QLA82XX_DEV_NEED_RESET &&
720 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 729 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
721 if (!ql4xdontresethba) { 730 if (!ql4xdontresethba) {
@@ -839,7 +848,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
839 } 848 }
840 849
841 /* Wakeup the dpc routine for this adapter, if needed. */ 850 /* Wakeup the dpc routine for this adapter, if needed. */
842 if ((start_dpc || 851 if (start_dpc ||
843 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 852 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
844 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || 853 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
845 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) || 854 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
@@ -849,9 +858,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
849 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || 858 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
850 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 859 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
851 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 860 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
852 test_bit(DPC_AEN, &ha->dpc_flags)) && 861 test_bit(DPC_AEN, &ha->dpc_flags)) {
853 !test_bit(AF_DPC_SCHEDULED, &ha->flags) &&
854 ha->dpc_thread) {
855 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 862 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
856 " - dpc flags = 0x%lx\n", 863 " - dpc flags = 0x%lx\n",
857 ha->host_no, __func__, ha->dpc_flags)); 864 ha->host_no, __func__, ha->dpc_flags));
@@ -1241,11 +1248,8 @@ static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
1241 1248
1242void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 1249void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
1243{ 1250{
1244 if (ha->dpc_thread && 1251 if (ha->dpc_thread)
1245 !test_bit(AF_DPC_SCHEDULED, &ha->flags)) {
1246 set_bit(AF_DPC_SCHEDULED, &ha->flags);
1247 queue_work(ha->dpc_thread, &ha->dpc_work); 1252 queue_work(ha->dpc_thread, &ha->dpc_work);
1248 }
1249} 1253}
1250 1254
1251/** 1255/**
@@ -1272,12 +1276,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1272 1276
1273 /* Initialization not yet finished. Don't do anything yet. */ 1277 /* Initialization not yet finished. Don't do anything yet. */
1274 if (!test_bit(AF_INIT_DONE, &ha->flags)) 1278 if (!test_bit(AF_INIT_DONE, &ha->flags))
1275 goto do_dpc_exit; 1279 return;
1276 1280
1277 if (test_bit(AF_EEH_BUSY, &ha->flags)) { 1281 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1278 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n", 1282 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
1279 ha->host_no, __func__, ha->flags)); 1283 ha->host_no, __func__, ha->flags));
1280 goto do_dpc_exit; 1284 return;
1281 } 1285 }
1282 1286
1283 if (is_qla8022(ha)) { 1287 if (is_qla8022(ha)) {
@@ -1384,8 +1388,6 @@ dpc_post_reset_ha:
1384 } 1388 }
1385 } 1389 }
1386 1390
1387do_dpc_exit:
1388 clear_bit(AF_DPC_SCHEDULED, &ha->flags);
1389} 1391}
1390 1392
1391/** 1393/**
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 603155769407..610492877253 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k6" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k7"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index abea2cf05c2e..a4b9cdbaaa0b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,6 +50,8 @@
50#define BUS_RESET_SETTLE_TIME (10) 50#define BUS_RESET_SETTLE_TIME (10)
51#define HOST_RESET_SETTLE_TIME (10) 51#define HOST_RESET_SETTLE_TIME (10)
52 52
53static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
54
53/* called with shost->host_lock held */ 55/* called with shost->host_lock held */
54void scsi_eh_wakeup(struct Scsi_Host *shost) 56void scsi_eh_wakeup(struct Scsi_Host *shost)
55{ 57{
@@ -947,6 +949,48 @@ retry_tur:
947} 949}
948 950
949/** 951/**
952 * scsi_eh_test_devices - check if devices are responding from error recovery.
953 * @cmd_list: scsi commands in error recovery.
954 * @work_q: queue for commands which still need more error recovery
955 * @done_q: queue for commands which are finished
956 * @try_stu: boolean on if a STU command should be tried in addition to TUR.
957 *
958 * Decription:
959 * Tests if devices are in a working state. Commands to devices now in
960 * a working state are sent to the done_q while commands to devices which
961 * are still failing to respond are returned to the work_q for more
962 * processing.
963 **/
964static int scsi_eh_test_devices(struct list_head *cmd_list,
965 struct list_head *work_q,
966 struct list_head *done_q, int try_stu)
967{
968 struct scsi_cmnd *scmd, *next;
969 struct scsi_device *sdev;
970 int finish_cmds;
971
972 while (!list_empty(cmd_list)) {
973 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
974 sdev = scmd->device;
975
976 finish_cmds = !scsi_device_online(scmd->device) ||
977 (try_stu && !scsi_eh_try_stu(scmd) &&
978 !scsi_eh_tur(scmd)) ||
979 !scsi_eh_tur(scmd);
980
981 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
982 if (scmd->device == sdev) {
983 if (finish_cmds)
984 scsi_eh_finish_cmd(scmd, done_q);
985 else
986 list_move_tail(&scmd->eh_entry, work_q);
987 }
988 }
989 return list_empty(work_q);
990}
991
992
993/**
950 * scsi_eh_abort_cmds - abort pending commands. 994 * scsi_eh_abort_cmds - abort pending commands.
951 * @work_q: &list_head for pending commands. 995 * @work_q: &list_head for pending commands.
952 * @done_q: &list_head for processed commands. 996 * @done_q: &list_head for processed commands.
@@ -962,6 +1006,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
962 struct list_head *done_q) 1006 struct list_head *done_q)
963{ 1007{
964 struct scsi_cmnd *scmd, *next; 1008 struct scsi_cmnd *scmd, *next;
1009 LIST_HEAD(check_list);
965 int rtn; 1010 int rtn;
966 1011
967 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1012 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -973,11 +1018,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
973 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); 1018 rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd);
974 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1019 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
975 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 1020 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
976 if (!scsi_device_online(scmd->device) || 1021 if (rtn == FAST_IO_FAIL)
977 rtn == FAST_IO_FAIL ||
978 !scsi_eh_tur(scmd)) {
979 scsi_eh_finish_cmd(scmd, done_q); 1022 scsi_eh_finish_cmd(scmd, done_q);
980 } 1023 else
1024 list_move_tail(&scmd->eh_entry, &check_list);
981 } else 1025 } else
982 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" 1026 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
983 " cmd failed:" 1027 " cmd failed:"
@@ -986,7 +1030,7 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
986 scmd)); 1030 scmd));
987 } 1031 }
988 1032
989 return list_empty(work_q); 1033 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
990} 1034}
991 1035
992/** 1036/**
@@ -1137,6 +1181,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1137 struct list_head *done_q) 1181 struct list_head *done_q)
1138{ 1182{
1139 LIST_HEAD(tmp_list); 1183 LIST_HEAD(tmp_list);
1184 LIST_HEAD(check_list);
1140 1185
1141 list_splice_init(work_q, &tmp_list); 1186 list_splice_init(work_q, &tmp_list);
1142 1187
@@ -1161,9 +1206,9 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1161 if (scmd_id(scmd) != id) 1206 if (scmd_id(scmd) != id)
1162 continue; 1207 continue;
1163 1208
1164 if ((rtn == SUCCESS || rtn == FAST_IO_FAIL) 1209 if (rtn == SUCCESS)
1165 && (!scsi_device_online(scmd->device) || 1210 list_move_tail(&scmd->eh_entry, &check_list);
1166 rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd))) 1211 else if (rtn == FAST_IO_FAIL)
1167 scsi_eh_finish_cmd(scmd, done_q); 1212 scsi_eh_finish_cmd(scmd, done_q);
1168 else 1213 else
1169 /* push back on work queue for further processing */ 1214 /* push back on work queue for further processing */
@@ -1171,7 +1216,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1171 } 1216 }
1172 } 1217 }
1173 1218
1174 return list_empty(work_q); 1219 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1175} 1220}
1176 1221
1177/** 1222/**
@@ -1185,6 +1230,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1185 struct list_head *done_q) 1230 struct list_head *done_q)
1186{ 1231{
1187 struct scsi_cmnd *scmd, *chan_scmd, *next; 1232 struct scsi_cmnd *scmd, *chan_scmd, *next;
1233 LIST_HEAD(check_list);
1188 unsigned int channel; 1234 unsigned int channel;
1189 int rtn; 1235 int rtn;
1190 1236
@@ -1216,12 +1262,14 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1216 rtn = scsi_try_bus_reset(chan_scmd); 1262 rtn = scsi_try_bus_reset(chan_scmd);
1217 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1263 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1218 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1264 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1219 if (channel == scmd_channel(scmd)) 1265 if (channel == scmd_channel(scmd)) {
1220 if (!scsi_device_online(scmd->device) || 1266 if (rtn == FAST_IO_FAIL)
1221 rtn == FAST_IO_FAIL ||
1222 !scsi_eh_tur(scmd))
1223 scsi_eh_finish_cmd(scmd, 1267 scsi_eh_finish_cmd(scmd,
1224 done_q); 1268 done_q);
1269 else
1270 list_move_tail(&scmd->eh_entry,
1271 &check_list);
1272 }
1225 } 1273 }
1226 } else { 1274 } else {
1227 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" 1275 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
@@ -1230,7 +1278,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1230 channel)); 1278 channel));
1231 } 1279 }
1232 } 1280 }
1233 return list_empty(work_q); 1281 return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1234} 1282}
1235 1283
1236/** 1284/**
@@ -1242,6 +1290,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1242 struct list_head *done_q) 1290 struct list_head *done_q)
1243{ 1291{
1244 struct scsi_cmnd *scmd, *next; 1292 struct scsi_cmnd *scmd, *next;
1293 LIST_HEAD(check_list);
1245 int rtn; 1294 int rtn;
1246 1295
1247 if (!list_empty(work_q)) { 1296 if (!list_empty(work_q)) {
@@ -1252,12 +1301,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1252 , current->comm)); 1301 , current->comm));
1253 1302
1254 rtn = scsi_try_host_reset(scmd); 1303 rtn = scsi_try_host_reset(scmd);
1255 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { 1304 if (rtn == SUCCESS) {
1305 list_splice_init(work_q, &check_list);
1306 } else if (rtn == FAST_IO_FAIL) {
1256 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1307 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1257 if (!scsi_device_online(scmd->device) ||
1258 rtn == FAST_IO_FAIL ||
1259 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1260 !scsi_eh_tur(scmd))
1261 scsi_eh_finish_cmd(scmd, done_q); 1308 scsi_eh_finish_cmd(scmd, done_q);
1262 } 1309 }
1263 } else { 1310 } else {
@@ -1266,7 +1313,7 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1266 current->comm)); 1313 current->comm));
1267 } 1314 }
1268 } 1315 }
1269 return list_empty(work_q); 1316 return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1270} 1317}
1271 1318
1272/** 1319/**
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index b587289cfacb..2bea4f0b684a 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -59,6 +59,10 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
59 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", 59 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
60 (unsigned long long)lba, (unsigned long long)txlen, 60 (unsigned long long)lba, (unsigned long long)txlen,
61 cdb[1] >> 5); 61 cdb[1] >> 5);
62
63 if (cdb[0] == WRITE_SAME)
64 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
65
62 trace_seq_putc(p, 0); 66 trace_seq_putc(p, 0);
63 67
64 return ret; 68 return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bd0806e64e85..953773cb26d9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -490,7 +490,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
490 unsigned int max_blocks = 0; 490 unsigned int max_blocks = 0;
491 491
492 q->limits.discard_zeroes_data = sdkp->lbprz; 492 q->limits.discard_zeroes_data = sdkp->lbprz;
493 q->limits.discard_alignment = sdkp->unmap_alignment; 493 q->limits.discard_alignment = sdkp->unmap_alignment *
494 logical_block_size;
494 q->limits.discard_granularity = 495 q->limits.discard_granularity =
495 max(sdkp->physical_block_size, 496 max(sdkp->physical_block_size,
496 sdkp->unmap_granularity * logical_block_size); 497 sdkp->unmap_granularity * logical_block_size);
@@ -2021,16 +2022,26 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2021 2022
2022 int dbd; 2023 int dbd;
2023 int modepage; 2024 int modepage;
2025 int first_len;
2024 struct scsi_mode_data data; 2026 struct scsi_mode_data data;
2025 struct scsi_sense_hdr sshdr; 2027 struct scsi_sense_hdr sshdr;
2026 int old_wce = sdkp->WCE; 2028 int old_wce = sdkp->WCE;
2027 int old_rcd = sdkp->RCD; 2029 int old_rcd = sdkp->RCD;
2028 int old_dpofua = sdkp->DPOFUA; 2030 int old_dpofua = sdkp->DPOFUA;
2029 2031
2030 if (sdp->skip_ms_page_8) 2032 first_len = 4;
2031 goto defaults; 2033 if (sdp->skip_ms_page_8) {
2032 2034 if (sdp->type == TYPE_RBC)
2033 if (sdp->type == TYPE_RBC) { 2035 goto defaults;
2036 else {
2037 if (sdp->skip_ms_page_3f)
2038 goto defaults;
2039 modepage = 0x3F;
2040 if (sdp->use_192_bytes_for_3f)
2041 first_len = 192;
2042 dbd = 0;
2043 }
2044 } else if (sdp->type == TYPE_RBC) {
2034 modepage = 6; 2045 modepage = 6;
2035 dbd = 8; 2046 dbd = 8;
2036 } else { 2047 } else {
@@ -2039,13 +2050,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2039 } 2050 }
2040 2051
2041 /* cautiously ask */ 2052 /* cautiously ask */
2042 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr); 2053 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
2054 &data, &sshdr);
2043 2055
2044 if (!scsi_status_is_good(res)) 2056 if (!scsi_status_is_good(res))
2045 goto bad_sense; 2057 goto bad_sense;
2046 2058
2047 if (!data.header_length) { 2059 if (!data.header_length) {
2048 modepage = 6; 2060 modepage = 6;
2061 first_len = 0;
2049 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n"); 2062 sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
2050 } 2063 }
2051 2064
@@ -2058,30 +2071,61 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2058 */ 2071 */
2059 if (len < 3) 2072 if (len < 3)
2060 goto bad_sense; 2073 goto bad_sense;
2061 if (len > 20) 2074 else if (len > SD_BUF_SIZE) {
2062 len = 20; 2075 sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2063 2076 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2064 /* Take headers and block descriptors into account */ 2077 len = SD_BUF_SIZE;
2065 len += data.header_length + data.block_descriptor_length; 2078 }
2066 if (len > SD_BUF_SIZE) 2079 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2067 goto bad_sense; 2080 len = 192;
2068 2081
2069 /* Get the data */ 2082 /* Get the data */
2070 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); 2083 if (len > first_len)
2084 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
2085 &data, &sshdr);
2071 2086
2072 if (scsi_status_is_good(res)) { 2087 if (scsi_status_is_good(res)) {
2073 int offset = data.header_length + data.block_descriptor_length; 2088 int offset = data.header_length + data.block_descriptor_length;
2074 2089
2075 if (offset >= SD_BUF_SIZE - 2) { 2090 while (offset < len) {
2076 sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); 2091 u8 page_code = buffer[offset] & 0x3F;
2077 goto defaults; 2092 u8 spf = buffer[offset] & 0x40;
2093
2094 if (page_code == 8 || page_code == 6) {
2095 /* We're interested only in the first 3 bytes.
2096 */
2097 if (len - offset <= 2) {
2098 sd_printk(KERN_ERR, sdkp, "Incomplete "
2099 "mode parameter data\n");
2100 goto defaults;
2101 } else {
2102 modepage = page_code;
2103 goto Page_found;
2104 }
2105 } else {
2106 /* Go to the next page */
2107 if (spf && len - offset > 3)
2108 offset += 4 + (buffer[offset+2] << 8) +
2109 buffer[offset+3];
2110 else if (!spf && len - offset > 1)
2111 offset += 2 + buffer[offset+1];
2112 else {
2113 sd_printk(KERN_ERR, sdkp, "Incomplete "
2114 "mode parameter data\n");
2115 goto defaults;
2116 }
2117 }
2078 } 2118 }
2079 2119
2080 if ((buffer[offset] & 0x3f) != modepage) { 2120 if (modepage == 0x3F) {
2121 sd_printk(KERN_ERR, sdkp, "No Caching mode page "
2122 "present\n");
2123 goto defaults;
2124 } else if ((buffer[offset] & 0x3f) != modepage) {
2081 sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); 2125 sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
2082 goto defaults; 2126 goto defaults;
2083 } 2127 }
2084 2128 Page_found:
2085 if (modepage == 8) { 2129 if (modepage == 8) {
2086 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); 2130 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2087 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); 2131 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 9f4b58b7daad..7e22b737dfd8 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
307 "0: bsfw %1,%w0\n\t" 307 "0: bsfw %1,%w0\n\t"
308 "btr %0,%1\n\t" 308 "btr %0,%1\n\t"
309 "jnc 0b" 309 "jnc 0b"
310 : "=&r" (rv), "=m" (*field) :); 310 : "=&r" (rv), "+m" (*field) :);
311 311
312 return rv; 312 return rv;
313} 313}